diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e4f5ea7451d..ff69eb9cc453 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,5 @@ +## UNRELEASED + ## 1.2.3 (September 13, 2018) FEATURES: @@ -6,6 +8,7 @@ FEATURES: * http: Added support for "Authorization: Bearer" head in addition to the X-Consul-Token header. [[GH-4483](https://github.com/hashicorp/consul/issues/4483)] * dns: Added a way to specify SRV weights for each service instance to allow weighted DNS load-balancing. [[GH-4198](https://github.com/hashicorp/consul/pull/4198)] * dns: Include EDNS-ECS options in EDNS responses where appropriate: see [RFC 7871](https://tools.ietf.org/html/rfc7871#section-7.1.3) [[GH-4647](https://github.com/hashicorp/consul/pull/4647)] +* ui: Add markers/icons for external sources [[GH-4640]](https://github.com/hashicorp/consul/pull/4640) IMPROVEMENTS: @@ -13,6 +16,7 @@ IMPROVEMENTS: * connect: TLS certificate readiness now performs x509 certificate verification to determine whether the cert is usable. [[GH-4540](https://github.com/hashicorp/consul/pull/4540)] * ui: The syntax highlighting/code editor is now on by default [[GH-4651]](https://github.com/hashicorp/consul/pull/4651) * ui: Fallback to showing `Node.Address` if `Service.Address` is not set [[GH-4579]](https://github.com/hashicorp/consul/issues/4579) +* gossip: Improvements to Serf and memberlist improving gossip stability on very large clusters (over 35k tested) [[GH-4511](https://github.com/hashicorp/consul/pull/4511)] BUG FIXES: * agent: Avoid returning empty data on startup of a non-leader server [[GH-4554](https://github.com/hashicorp/consul/pull/4554)] diff --git a/GNUmakefile b/GNUmakefile old mode 100644 new mode 100755 index b7e5037d8a81..f199103df0d3 --- a/GNUmakefile +++ b/GNUmakefile @@ -189,7 +189,7 @@ test-internal: @awk '/^[^[:space:]]/ {do_print=0} /--- SKIP/ {do_print=1} do_print==1 {print}' test.log @awk '/^[^[:space:]]/ {do_print=0} /--- FAIL/ {do_print=1} do_print==1 {print}' test.log @grep '^FAIL' test.log || true - @if [ "$$(cat exit-code)" == "0" ] ; then echo "PASS" ; exit 0 ; else exit 1 ; fi + @if [ "$$(cat exit-code)" == "0" ] ; then echo "PASS" ; exit 0 ; else echo FAILED; tail -n50 test.log; exit 1 ; fi test-race: $(MAKE) GOTEST_FLAGS=-race diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 33bd610f8f29..c0c2ff29173d 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -531,6 +531,126 @@ func (s *HTTPServer) AgentCheckUpdate(resp http.ResponseWriter, req *http.Reques return nil, nil } +func AgentHealthService(serviceId string, s *HTTPServer) (int, string) { + checks := s.agent.State.Checks() + serviceChecks := make(api.HealthChecks, 0) + for _, c := range checks { + if c.ServiceID == serviceId || c.ServiceID == "" { + // TODO: harmonize struct.HealthCheck and api.HealthCheck (or at least extract conversion function) + healthCheck := &api.HealthCheck{ + Node: c.Node, + CheckID: string(c.CheckID), + Name: c.Name, + Status: c.Status, + Notes: c.Notes, + Output: c.Output, + ServiceID: c.ServiceID, + ServiceName: c.ServiceName, + ServiceTags: c.ServiceTags, + } + serviceChecks = append(serviceChecks, healthCheck) + } + } + status := serviceChecks.AggregatedStatus() + switch status { + case api.HealthWarning: + return http.StatusTooManyRequests, status + case api.HealthPassing: + return http.StatusOK, status + default: + return http.StatusServiceUnavailable, status + } +} + +func returnTextPlain(req *http.Request) bool { + if contentType := req.Header.Get("Accept"); strings.HasPrefix(contentType, "text/plain") { + return true + } + if format := req.URL.Query().Get("format"); format == "text" { + return true + } + return false +} + +func (s *HTTPServer) AgentHealthServiceId(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Pull out the service id (service id since there may be several instance of the same service on this host) + serviceID := strings.TrimPrefix(req.URL.Path, "/v1/agent/health/service/id/") + if serviceID == "" { + resp.WriteHeader(http.StatusBadRequest) + fmt.Fprint(resp, "Missing service id") + return nil, nil + } + services := s.agent.State.Services() + for _, service := range services { + if service.ID == serviceID { + code, status := AgentHealthService(serviceID, s) + if returnTextPlain(req) { + resp.WriteHeader(code) + fmt.Fprint(resp, status) + return nil, nil + } + resp.Header().Add("Content-Type", "application/json") + resp.WriteHeader(code) + result := make(map[string]*structs.NodeService) + result[status] = service + return result, nil + } + } + if returnTextPlain(req) { + resp.WriteHeader(http.StatusNotFound) + fmt.Fprintf(resp, "ServiceId %s not found", serviceID) + return nil, nil + } + resp.Header().Add("Content-Type", "application/json") + resp.WriteHeader(http.StatusNotFound) + result := make(map[string]*structs.NodeService) + return result, nil +} + +func (s *HTTPServer) AgentHealthServiceName(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + + // Pull out the service name + serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/health/service/name/") + if serviceName == "" { + resp.WriteHeader(http.StatusBadRequest) + fmt.Fprint(resp, "Missing service name") + return nil, nil + } + code := http.StatusNotFound + status := fmt.Sprintf("ServiceName %s Not Found", serviceName) + services := s.agent.State.Services() + result := make(map[string][]*structs.NodeService) + for _, service := range services { + if service.Service == serviceName { + scode, sstatus := AgentHealthService(service.ID, s) + res, ok := result[sstatus] + if !ok { + res = make([]*structs.NodeService, 0, 4) + } + result[sstatus] = append(res, service) + // When service is not found, we ignore it and keep existing HTTP status + if code == http.StatusNotFound { + code = scode + status = sstatus + } + // We take the worst of all statuses, so we keep iterating + // passing: 200 < < warning: 429 < critical: 503 + if code < scode { + code = scode + status = sstatus + } + } + } + if returnTextPlain(req) { + resp.WriteHeader(code) + fmt.Fprint(resp, status) + return nil, nil + } + resp.Header().Add("Content-Type", "application/json") + resp.WriteHeader(code) + return result, nil +} + func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Request) (interface{}, error) { var args structs.ServiceDefinition // Fixup the type decode of TTL or Interval if a check if provided. diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index aedb3cadb566..fb03bc670709 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -191,6 +191,425 @@ func TestAgent_Checks(t *testing.T) { } } +func TestAgent_Health_Service_Id(t *testing.T) { + t.Parallel() + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + service := &structs.NodeService{ + ID: "mysql", + Service: "mysql", + } + if err := a.AddService(service, nil, false, ""); err != nil { + t.Fatalf("err: %v", err) + } + service = &structs.NodeService{ + ID: "mysql2", + Service: "mysql2", + } + if err := a.AddService(service, nil, false, ""); err != nil { + t.Fatalf("err: %v", err) + } + service = &structs.NodeService{ + ID: "mysql3", + Service: "mysql3", + } + if err := a.AddService(service, nil, false, ""); err != nil { + t.Fatalf("err: %v", err) + } + + chk1 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "mysql", + Name: "mysql", + ServiceID: "mysql", + Status: api.HealthPassing, + } + err := a.State.AddCheck(chk1, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + chk2 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "mysql", + Name: "mysql", + ServiceID: "mysql", + Status: api.HealthPassing, + } + err = a.State.AddCheck(chk2, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + chk3 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "mysql2", + Name: "mysql2", + ServiceID: "mysql2", + Status: api.HealthPassing, + } + err = a.State.AddCheck(chk3, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + chk4 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "mysql2", + Name: "mysql2", + ServiceID: "mysql2", + Status: api.HealthWarning, + } + err = a.State.AddCheck(chk4, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + chk5 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "mysql3", + Name: "mysql3", + ServiceID: "mysql3", + Status: api.HealthMaint, + } + err = a.State.AddCheck(chk5, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + chk6 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "mysql3", + Name: "mysql3", + ServiceID: "mysql3", + Status: api.HealthCritical, + } + err = a.State.AddCheck(chk6, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + eval := func(t *testing.T, url string, expectedCode int, expected string) { + t.Run("format=text", func(t *testing.T) { + req, _ := http.NewRequest("GET", url+"?format=text", nil) + resp := httptest.NewRecorder() + _, err := a.srv.AgentHealthServiceId(resp, req) + if err != nil { + t.Fatalf("Err: %v", err) + } + if got, want := resp.Code, expectedCode; got != want { + t.Fatalf("returned bad status: %d. Body: %q", resp.Code, resp.Body.String()) + } + if got, want := resp.Body.String(), expected; got != want { + t.Fatalf("got body %q want %q", got, want) + } + }) + t.Run("format=json", func(t *testing.T) { + req, _ := http.NewRequest("GET", url, nil) + resp := httptest.NewRecorder() + dataRaw, err := a.srv.AgentHealthServiceId(resp, req) + if err != nil { + t.Fatalf("Err: %v", err) + } + data, ok := dataRaw.(map[string]*structs.NodeService) + if !ok { + t.Fatalf("Cannot connvert result to JSON") + } + if got, want := resp.Code, expectedCode; got != want { + t.Fatalf("returned bad status: %d. Body: %#v", resp.Code, data) + } + if resp.Code != http.StatusNotFound { + if _, ok := data[expected]; !ok { + t.Fatalf("got body %v want %v", data, expected) + } + } + }) + } + + t.Run("passing checks", func(t *testing.T) { + eval(t, "/v1/agent/health/service/id/mysql", 200, "passing") + }) + t.Run("warning checks", func(t *testing.T) { + eval(t, "/v1/agent/health/service/id/mysql2", 429, "warning") + }) + t.Run("critical checks", func(t *testing.T) { + eval(t, "/v1/agent/health/service/id/mysql3", 503, "critical") + }) + t.Run("unknown serviceid", func(t *testing.T) { + eval(t, "/v1/agent/health/service/id/mysql1", 404, "ServiceId mysql1 not found") + }) + + nodeCheck := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "diskCheck", + Name: "diskCheck", + Status: api.HealthCritical, + } + err = a.State.AddCheck(nodeCheck, "") + + if err != nil { + t.Fatalf("Err: %v", err) + } + t.Run("critical check on node", func(t *testing.T) { + eval(t, "/v1/agent/health/service/id/mysql", 503, "critical") + }) + + err = a.State.RemoveCheck(nodeCheck.CheckID) + if err != nil { + t.Fatalf("Err: %v", err) + } + nodeCheck = &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "_node_maintenance", + Name: "_node_maintenance", + Status: api.HealthMaint, + } + err = a.State.AddCheck(nodeCheck, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + t.Run("maintenance check on node", func(t *testing.T) { + eval(t, "/v1/agent/health/service/id/mysql", 503, "maintenance") + }) +} + +func TestAgent_Health_Service_Name(t *testing.T) { + t.Parallel() + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + service := &structs.NodeService{ + ID: "mysql1", + Service: "mysql-pool-r", + } + if err := a.AddService(service, nil, false, ""); err != nil { + t.Fatalf("err: %v", err) + } + service = &structs.NodeService{ + ID: "mysql2", + Service: "mysql-pool-r", + } + if err := a.AddService(service, nil, false, ""); err != nil { + t.Fatalf("err: %v", err) + } + service = &structs.NodeService{ + ID: "mysql3", + Service: "mysql-pool-rw", + } + if err := a.AddService(service, nil, false, ""); err != nil { + t.Fatalf("err: %v", err) + } + service = &structs.NodeService{ + ID: "mysql4", + Service: "mysql-pool-rw", + } + if err := a.AddService(service, nil, false, ""); err != nil { + t.Fatalf("err: %v", err) + } + service = &structs.NodeService{ + ID: "httpd1", + Service: "httpd", + } + if err := a.AddService(service, nil, false, ""); err != nil { + t.Fatalf("err: %v", err) + } + service = &structs.NodeService{ + ID: "httpd2", + Service: "httpd", + } + if err := a.AddService(service, nil, false, ""); err != nil { + t.Fatalf("err: %v", err) + } + + chk1 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "mysql1", + Name: "mysql1", + ServiceID: "mysql1", + ServiceName: "mysql-pool-r", + Status: api.HealthPassing, + } + err := a.State.AddCheck(chk1, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + chk2 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "mysql1", + Name: "mysql1", + ServiceID: "mysql1", + ServiceName: "mysql-pool-r", + Status: api.HealthWarning, + } + err = a.State.AddCheck(chk2, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + chk3 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "mysql2", + Name: "mysql2", + ServiceID: "mysql2", + ServiceName: "mysql-pool-r", + Status: api.HealthPassing, + } + err = a.State.AddCheck(chk3, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + chk4 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "mysql2", + Name: "mysql2", + ServiceID: "mysql2", + ServiceName: "mysql-pool-r", + Status: api.HealthCritical, + } + err = a.State.AddCheck(chk4, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + chk5 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "mysql3", + Name: "mysql3", + ServiceID: "mysql3", + ServiceName: "mysql-pool-rw", + Status: api.HealthWarning, + } + err = a.State.AddCheck(chk5, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + chk6 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "mysql4", + Name: "mysql4", + ServiceID: "mysql4", + ServiceName: "mysql-pool-rw", + Status: api.HealthPassing, + } + err = a.State.AddCheck(chk6, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + chk7 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "httpd1", + Name: "httpd1", + ServiceID: "httpd1", + ServiceName: "httpd", + Status: api.HealthPassing, + } + err = a.State.AddCheck(chk7, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + chk8 := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "httpd2", + Name: "httpd2", + ServiceID: "httpd2", + ServiceName: "httpd", + Status: api.HealthPassing, + } + err = a.State.AddCheck(chk8, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + + eval := func(t *testing.T, url string, expectedCode int, expected string) { + t.Run("format=text", func(t *testing.T) { + req, _ := http.NewRequest("GET", url+"?format=text", nil) + resp := httptest.NewRecorder() + _, err := a.srv.AgentHealthServiceName(resp, req) + if err != nil { + t.Fatalf("Err: %v", err) + } + if got, want := resp.Code, expectedCode; got != want { + t.Fatalf("returned bad status: %d. Body: %q", resp.Code, resp.Body.String()) + } + if got, want := resp.Body.String(), expected; got != want { + t.Fatalf("got body %q want %q", got, want) + } + }) + t.Run("format=json", func(t *testing.T) { + req, _ := http.NewRequest("GET", url, nil) + resp := httptest.NewRecorder() + dataRaw, err := a.srv.AgentHealthServiceName(resp, req) + if err != nil { + t.Fatalf("Err: %v", err) + } + data, ok := dataRaw.(map[string][]*structs.NodeService) + if !ok { + t.Fatalf("Cannot connvert result to JSON") + } + if got, want := resp.Code, expectedCode; got != want { + t.Fatalf("returned bad status: %d. Body: %#v", resp.Code, data) + } + if resp.Code != http.StatusNotFound { + if _, ok := data[expected]; !ok { + t.Fatalf("got body %v want %v", data, expected) + } + } + }) + } + + t.Run("passing checks", func(t *testing.T) { + eval(t, "/v1/agent/health/service/name/httpd", 200, "passing") + }) + t.Run("warning checks", func(t *testing.T) { + eval(t, "/v1/agent/health/service/name/mysql-pool-rw", 429, "warning") + }) + t.Run("critical checks", func(t *testing.T) { + eval(t, "/v1/agent/health/service/name/mysql-pool-r", 503, "critical") + }) + t.Run("unknown serviceName", func(t *testing.T) { + eval(t, "/v1/agent/health/service/name/test", 404, "ServiceName test Not Found") + }) + nodeCheck := &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "diskCheck", + Name: "diskCheck", + Status: api.HealthCritical, + } + err = a.State.AddCheck(nodeCheck, "") + + if err != nil { + t.Fatalf("Err: %v", err) + } + t.Run("critical check on node", func(t *testing.T) { + eval(t, "/v1/agent/health/service/name/mysql-pool-r", 503, "critical") + }) + + err = a.State.RemoveCheck(nodeCheck.CheckID) + if err != nil { + t.Fatalf("Err: %v", err) + } + nodeCheck = &structs.HealthCheck{ + Node: a.Config.NodeName, + CheckID: "_node_maintenance", + Name: "_node_maintenance", + Status: api.HealthMaint, + } + err = a.State.AddCheck(nodeCheck, "") + if err != nil { + t.Fatalf("Err: %v", err) + } + t.Run("maintenance check on node", func(t *testing.T) { + eval(t, "/v1/agent/health/service/name/mysql-pool-r", 503, "maintenance") + }) +} + func TestAgent_Checks_ACLFilter(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), TestACLConfig()) diff --git a/agent/catalog_endpoint_test.go b/agent/catalog_endpoint_test.go index 24382c0fff78..7ba7882752f2 100644 --- a/agent/catalog_endpoint_test.go +++ b/agent/catalog_endpoint_test.go @@ -409,6 +409,7 @@ func TestCatalogServices(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") // Register node args := &structs.RegisterRequest{ @@ -678,6 +679,7 @@ func TestCatalogServiceNodes_DistanceSort(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") // Register nodes. args := &structs.RegisterRequest{ @@ -769,6 +771,7 @@ func TestCatalogServiceNodes_ConnectProxy(t *testing.T) { assert := assert.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") // Register args := structs.TestRegisterRequestProxy(t) @@ -795,6 +798,7 @@ func TestCatalogConnectServiceNodes_good(t *testing.T) { assert := assert.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") // Register args := structs.TestRegisterRequestProxy(t) diff --git a/agent/connect/ca/provider.go b/agent/connect/ca/provider.go index f7daa5fc7104..a812d6411318 100644 --- a/agent/connect/ca/provider.go +++ b/agent/connect/ca/provider.go @@ -8,7 +8,16 @@ import ( // an external CA that provides leaf certificate signing for // given SpiffeIDServices. type Provider interface { - // Active root returns the currently active root CA for this + // Configure initializes the provider based on the given cluster ID, root status + // and configuration values. + Configure(clusterId string, isRoot bool, rawConfig map[string]interface{}) error + + // GenerateRoot causes the creation of a new root certificate for this provider. + // This can also be a no-op if a root certificate already exists for the given + // config. If isRoot is false, calling this method is an error. + GenerateRoot() error + + // ActiveRoot returns the currently active root CA for this // provider. This should be a parent of the certificate returned by // ActiveIntermediate() ActiveRoot() (string, error) diff --git a/agent/connect/ca/provider_consul.go b/agent/connect/ca/provider_consul.go index 5cbf744ab8ad..803ce2ac188d 100644 --- a/agent/connect/ca/provider_consul.go +++ b/agent/connect/ca/provider_consul.go @@ -3,12 +3,15 @@ package ca import ( "bytes" "crypto/rand" + "crypto/sha256" "crypto/x509" "crypto/x509/pkix" "encoding/pem" + "errors" "fmt" "math/big" "net/url" + "strings" "sync" "time" @@ -17,10 +20,14 @@ import ( "github.com/hashicorp/consul/agent/structs" ) +var ErrNotInitialized = errors.New("provider not initialized") + type ConsulProvider struct { - config *structs.ConsulCAProviderConfig - id string - delegate ConsulProviderStateDelegate + Delegate ConsulProviderStateDelegate + + config *structs.ConsulCAProviderConfig + id string + isRoot bool sync.RWMutex } @@ -29,73 +36,126 @@ type ConsulProviderStateDelegate interface { ApplyCARequest(*structs.CARequest) error } -// NewConsulProvider returns a new instance of the Consul CA provider, -// bootstrapping its state in the state store necessary -func NewConsulProvider(rawConfig map[string]interface{}, delegate ConsulProviderStateDelegate) (*ConsulProvider, error) { - conf, err := ParseConsulCAConfig(rawConfig) +// Configure sets up the provider using the given configuration. +func (c *ConsulProvider) Configure(clusterID string, isRoot bool, rawConfig map[string]interface{}) error { + // Parse the raw config and update our ID. + config, err := ParseConsulCAConfig(rawConfig) if err != nil { - return nil, err - } - provider := &ConsulProvider{ - config: conf, - delegate: delegate, - id: fmt.Sprintf("%s,%s", conf.PrivateKey, conf.RootCert), + return err } + c.config = config + c.isRoot = isRoot + hash := sha256.Sum256([]byte(fmt.Sprintf("%s,%s,%v", config.PrivateKey, config.RootCert, isRoot))) + c.id = strings.Replace(fmt.Sprintf("% x", hash), " ", ":", -1) - // Check if this configuration of the provider has already been - // initialized in the state store. - state := delegate.State() - _, providerState, err := state.CAProviderState(provider.id) + // Exit early if the state store has an entry for this provider's config. + _, providerState, err := c.Delegate.State().CAProviderState(c.id) if err != nil { - return nil, err + return err } - // Exit early if the state store has already been populated for this config. if providerState != nil { - return provider, nil + return nil } - newState := structs.CAConsulProviderState{ - ID: provider.id, + // Check if there's an entry with the old ID scheme. + oldID := fmt.Sprintf("%s,%s", config.PrivateKey, config.RootCert) + _, providerState, err = c.Delegate.State().CAProviderState(oldID) + if err != nil { + return err } - // Write the initial provider state to get the index to use for the - // CA serial number. - { - args := &structs.CARequest{ + // Found an entry with the old ID, so update it to the new ID and + // delete the old entry. + if providerState != nil { + newState := *providerState + newState.ID = c.id + createReq := &structs.CARequest{ Op: structs.CAOpSetProviderState, ProviderState: &newState, } - if err := delegate.ApplyCARequest(args); err != nil { - return nil, err + if err := c.Delegate.ApplyCARequest(createReq); err != nil { + return err + } + + deleteReq := &structs.CARequest{ + Op: structs.CAOpDeleteProviderState, + ProviderState: providerState, } + if err := c.Delegate.ApplyCARequest(deleteReq); err != nil { + return err + } + + return nil } - idx, _, err := state.CAProviderState(provider.id) + // Write the provider state to the state store. + newState := structs.CAConsulProviderState{ + ID: c.id, + } + + args := &structs.CARequest{ + Op: structs.CAOpSetProviderState, + ProviderState: &newState, + } + if err := c.Delegate.ApplyCARequest(args); err != nil { + return err + } + + return nil +} + +// ActiveRoot returns the active root CA certificate. +func (c *ConsulProvider) ActiveRoot() (string, error) { + state := c.Delegate.State() + _, providerState, err := state.CAProviderState(c.id) if err != nil { - return nil, err + return "", err + } + + return providerState.RootCert, nil +} + +// GenerateRoot initializes a new root certificate and private key +// if needed. +func (c *ConsulProvider) GenerateRoot() error { + state := c.Delegate.State() + idx, providerState, err := state.CAProviderState(c.id) + if err != nil { + return err + } + + if providerState == nil { + return ErrNotInitialized + } + if !c.isRoot { + return fmt.Errorf("provider is not the root certificate authority") + } + if providerState.RootCert != "" { + return nil } // Generate a private key if needed - if conf.PrivateKey == "" { + newState := *providerState + if c.config.PrivateKey == "" { _, pk, err := connect.GeneratePrivateKey() if err != nil { - return nil, err + return err } newState.PrivateKey = pk } else { - newState.PrivateKey = conf.PrivateKey + newState.PrivateKey = c.config.PrivateKey } // Generate the root CA if necessary - if conf.RootCert == "" { - ca, err := provider.generateCA(newState.PrivateKey, idx+1) + if c.config.RootCert == "" { + ca, err := c.generateCA(newState.PrivateKey, idx+1) if err != nil { - return nil, fmt.Errorf("error generating CA: %v", err) + return fmt.Errorf("error generating CA: %v", err) } newState.RootCert = ca } else { - newState.RootCert = conf.RootCert + newState.RootCert = c.config.RootCert } // Write the provider state @@ -103,22 +163,11 @@ func NewConsulProvider(rawConfig map[string]interface{}, delegate ConsulProvider Op: structs.CAOpSetProviderState, ProviderState: &newState, } - if err := delegate.ApplyCARequest(args); err != nil { - return nil, err - } - - return provider, nil -} - -// Return the active root CA and generate a new one if needed -func (c *ConsulProvider) ActiveRoot() (string, error) { - state := c.delegate.State() - _, providerState, err := state.CAProviderState(c.id) - if err != nil { - return "", err + if err := c.Delegate.ApplyCARequest(args); err != nil { + return err } - return providerState.RootCert, nil + return nil } // We aren't maintaining separate root/intermediate CAs for the builtin @@ -139,7 +188,7 @@ func (c *ConsulProvider) Cleanup() error { Op: structs.CAOpDeleteProviderState, ProviderState: &structs.CAConsulProviderState{ID: c.id}, } - if err := c.delegate.ApplyCARequest(args); err != nil { + if err := c.Delegate.ApplyCARequest(args); err != nil { return err } @@ -155,7 +204,7 @@ func (c *ConsulProvider) Sign(csr *x509.CertificateRequest) (string, error) { defer c.Unlock() // Get the provider state - state := c.delegate.State() + state := c.Delegate.State() idx, providerState, err := state.CAProviderState(c.id) if err != nil { return "", err @@ -247,7 +296,7 @@ func (c *ConsulProvider) CrossSignCA(cert *x509.Certificate) (string, error) { defer c.Unlock() // Get the provider state - state := c.delegate.State() + state := c.Delegate.State() idx, providerState, err := state.CAProviderState(c.id) if err != nil { return "", err @@ -315,7 +364,7 @@ func (c *ConsulProvider) incrementProviderIndex(providerState *structs.CAConsulP Op: structs.CAOpSetProviderState, ProviderState: &newState, } - if err := c.delegate.ApplyCARequest(args); err != nil { + if err := c.Delegate.ApplyCARequest(args); err != nil { return err } @@ -324,7 +373,7 @@ func (c *ConsulProvider) incrementProviderIndex(providerState *structs.CAConsulP // generateCA makes a new root CA using the current private key func (c *ConsulProvider) generateCA(privateKey string, sn uint64) (string, error) { - state := c.delegate.State() + state := c.Delegate.State() _, config, err := state.CAConfig() if err != nil { return "", err @@ -348,9 +397,9 @@ func (c *ConsulProvider) generateCA(privateKey string, sn uint64) (string, error serialNum := &big.Int{} serialNum.SetUint64(sn) template := x509.Certificate{ - SerialNumber: serialNum, - Subject: pkix.Name{CommonName: name}, - URIs: []*url.URL{id.URI()}, + SerialNumber: serialNum, + Subject: pkix.Name{CommonName: name}, + URIs: []*url.URL{id.URI()}, BasicConstraintsValid: true, KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign | diff --git a/agent/connect/ca/provider_consul_test.go b/agent/connect/ca/provider_consul_test.go index 2092d934d485..26f044e30a1b 100644 --- a/agent/connect/ca/provider_consul_test.go +++ b/agent/connect/ca/provider_consul_test.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -75,32 +74,33 @@ func testConsulCAConfig() *structs.CAConfiguration { func TestConsulCAProvider_Bootstrap(t *testing.T) { t.Parallel() - assert := assert.New(t) + require := require.New(t) conf := testConsulCAConfig() delegate := newMockDelegate(t, conf) - provider, err := NewConsulProvider(conf.Config, delegate) - assert.NoError(err) + provider := &ConsulProvider{Delegate: delegate} + require.NoError(provider.Configure(conf.ClusterID, true, conf.Config)) + require.NoError(provider.GenerateRoot()) root, err := provider.ActiveRoot() - assert.NoError(err) + require.NoError(err) // Intermediate should be the same cert. inter, err := provider.ActiveIntermediate() - assert.NoError(err) - assert.Equal(root, inter) + require.NoError(err) + require.Equal(root, inter) // Should be a valid cert parsed, err := connect.ParseCert(root) - assert.NoError(err) - assert.Equal(parsed.URIs[0].String(), fmt.Sprintf("spiffe://%s.consul", conf.ClusterID)) + require.NoError(err) + require.Equal(parsed.URIs[0].String(), fmt.Sprintf("spiffe://%s.consul", conf.ClusterID)) } func TestConsulCAProvider_Bootstrap_WithCert(t *testing.T) { t.Parallel() // Make sure setting a custom private key/root cert works. - assert := assert.New(t) + require := require.New(t) rootCA := connect.TestCA(t, nil) conf := testConsulCAConfig() conf.Config = map[string]interface{}{ @@ -109,12 +109,13 @@ func TestConsulCAProvider_Bootstrap_WithCert(t *testing.T) { } delegate := newMockDelegate(t, conf) - provider, err := NewConsulProvider(conf.Config, delegate) - assert.NoError(err) + provider := &ConsulProvider{Delegate: delegate} + require.NoError(provider.Configure(conf.ClusterID, true, conf.Config)) + require.NoError(provider.GenerateRoot()) root, err := provider.ActiveRoot() - assert.NoError(err) - assert.Equal(root, rootCA.RootCert) + require.NoError(err) + require.Equal(root, rootCA.RootCert) } func TestConsulCAProvider_SignLeaf(t *testing.T) { @@ -125,8 +126,9 @@ func TestConsulCAProvider_SignLeaf(t *testing.T) { conf.Config["LeafCertTTL"] = "1h" delegate := newMockDelegate(t, conf) - provider, err := NewConsulProvider(conf.Config, delegate) - require.NoError(err) + provider := &ConsulProvider{Delegate: delegate} + require.NoError(provider.Configure(conf.ClusterID, true, conf.Config)) + require.NoError(provider.GenerateRoot()) spiffeService := &connect.SpiffeIDService{ Host: "node1", @@ -183,17 +185,20 @@ func TestConsulCAProvider_SignLeaf(t *testing.T) { func TestConsulCAProvider_CrossSignCA(t *testing.T) { t.Parallel() + require := require.New(t) conf1 := testConsulCAConfig() delegate1 := newMockDelegate(t, conf1) - provider1, err := NewConsulProvider(conf1.Config, delegate1) - require.NoError(t, err) + provider1 := &ConsulProvider{Delegate: delegate1} + require.NoError(provider1.Configure(conf1.ClusterID, true, conf1.Config)) + require.NoError(provider1.GenerateRoot()) conf2 := testConsulCAConfig() conf2.CreateIndex = 10 delegate2 := newMockDelegate(t, conf2) - provider2, err := NewConsulProvider(conf2.Config, delegate2) - require.NoError(t, err) + provider2 := &ConsulProvider{Delegate: delegate2} + require.NoError(provider2.Configure(conf2.ClusterID, true, conf2.Config)) + require.NoError(provider2.GenerateRoot()) testCrossSignProviders(t, provider1, provider2) } @@ -269,3 +274,32 @@ func testCrossSignProviders(t *testing.T, provider1, provider2 Provider) { require.NoError(err) } } + +func TestConsulCAProvider_MigrateOldID(t *testing.T) { + t.Parallel() + + require := require.New(t) + conf := testConsulCAConfig() + delegate := newMockDelegate(t, conf) + + // Create an entry with an old-style ID. + err := delegate.ApplyCARequest(&structs.CARequest{ + Op: structs.CAOpSetProviderState, + ProviderState: &structs.CAConsulProviderState{ + ID: ",", + }, + }) + require.NoError(err) + _, providerState, err := delegate.state.CAProviderState(",") + require.NoError(err) + require.NotNil(providerState) + + provider := &ConsulProvider{Delegate: delegate} + require.NoError(provider.Configure(conf.ClusterID, true, conf.Config)) + require.NoError(provider.GenerateRoot()) + + // After running Configure, the old ID entry should be gone. + _, providerState, err = delegate.state.CAProviderState(",") + require.NoError(err) + require.Nil(providerState) +} diff --git a/agent/connect/ca/provider_vault.go b/agent/connect/ca/provider_vault.go index eaf36460906c..cbbccd7f8b40 100644 --- a/agent/connect/ca/provider_vault.go +++ b/agent/connect/ca/provider_vault.go @@ -24,41 +24,50 @@ var ErrBackendNotInitialized = fmt.Errorf("backend not initialized") type VaultProvider struct { config *structs.VaultCAProviderConfig client *vaultapi.Client + isRoot bool clusterId string } -// NewVaultProvider returns a vault provider with its root and intermediate PKI -// backends mounted and initialized. If the root backend is not set up already, -// it will be mounted/generated as needed, but any existing state will not be -// overwritten. -func NewVaultProvider(rawConfig map[string]interface{}, clusterId string) (*VaultProvider, error) { - conf, err := ParseVaultCAConfig(rawConfig) +// Configure sets up the provider using the given configuration. +func (v *VaultProvider) Configure(clusterId string, isRoot bool, rawConfig map[string]interface{}) error { + config, err := ParseVaultCAConfig(rawConfig) if err != nil { - return nil, err + return err } - // todo(kyhavlov): figure out the right way to pass the TLS config clientConf := &vaultapi.Config{ - Address: conf.Address, + Address: config.Address, } client, err := vaultapi.NewClient(clientConf) if err != nil { - return nil, err + return err } - client.SetToken(conf.Token) + client.SetToken(config.Token) + v.config = config + v.client = client + v.isRoot = isRoot + v.clusterId = clusterId + + return nil +} + +// ActiveRoot returns the active root CA certificate. +func (v *VaultProvider) ActiveRoot() (string, error) { + return v.getCA(v.config.RootPKIPath) +} - provider := &VaultProvider{ - config: conf, - client: client, - clusterId: clusterId, +// GenerateRoot mounts and initializes a new root PKI backend if needed. +func (v *VaultProvider) GenerateRoot() error { + if !v.isRoot { + return fmt.Errorf("provider is not the root certificate authority") } // Set up the root PKI backend if necessary. - _, err = provider.ActiveRoot() + _, err := v.ActiveRoot() switch err { case ErrBackendNotMounted: - err := client.Sys().Mount(conf.RootPKIPath, &vaultapi.MountInput{ + err := v.client.Sys().Mount(v.config.RootPKIPath, &vaultapi.MountInput{ Type: "pki", Description: "root CA backend for Consul Connect", Config: vaultapi.MountConfigInput{ @@ -67,41 +76,33 @@ func NewVaultProvider(rawConfig map[string]interface{}, clusterId string) (*Vaul }) if err != nil { - return nil, err + return err } fallthrough case ErrBackendNotInitialized: - spiffeID := connect.SpiffeIDSigning{ClusterID: clusterId, Domain: "consul"} + spiffeID := connect.SpiffeIDSigning{ClusterID: v.clusterId, Domain: "consul"} uuid, err := uuid.GenerateUUID() if err != nil { - return nil, err + return err } - _, err = client.Logical().Write(conf.RootPKIPath+"root/generate/internal", map[string]interface{}{ + _, err = v.client.Logical().Write(v.config.RootPKIPath+"root/generate/internal", map[string]interface{}{ "common_name": fmt.Sprintf("Vault CA Root Authority %s", uuid), "uri_sans": spiffeID.URI().String(), }) if err != nil { - return nil, err + return err } default: if err != nil { - return nil, err + return err } } - // Set up the intermediate backend. - if _, err := provider.GenerateIntermediate(); err != nil { - return nil, err - } - - return provider, nil -} - -func (v *VaultProvider) ActiveRoot() (string, error) { - return v.getCA(v.config.RootPKIPath) + return nil } +// ActiveIntermediate returns the current intermediate certificate. func (v *VaultProvider) ActiveIntermediate() (string, error) { return v.getCA(v.config.IntermediatePKIPath) } diff --git a/agent/connect/ca/provider_vault_test.go b/agent/connect/ca/provider_vault_test.go index 05f8c36448c0..eee9e9544cec 100644 --- a/agent/connect/ca/provider_vault_test.go +++ b/agent/connect/ca/provider_vault_test.go @@ -39,10 +39,12 @@ func testVaultClusterWithConfig(t *testing.T, rawConf map[string]interface{}) (* conf[k] = v } - provider, err := NewVaultProvider(conf, "asdf") - if err != nil { - t.Fatal(err) - } + require := require.New(t) + provider := &VaultProvider{} + require.NoError(provider.Configure("asdf", true, conf)) + require.NoError(provider.GenerateRoot()) + _, err := provider.GenerateIntermediate() + require.NoError(err) return provider, core, ln } diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 9a02d6763dc1..e61f2b80ced7 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -95,6 +95,12 @@ func (s *ConnectCA) ConfigurationSet( if err != nil { return fmt.Errorf("could not initialize provider: %v", err) } + if err := newProvider.Configure(args.Config.ClusterID, true, args.Config.Config); err != nil { + return fmt.Errorf("error configuring provider: %v", err) + } + if err := newProvider.GenerateRoot(); err != nil { + return fmt.Errorf("error generating CA root certificate: %v", err) + } newRootPEM, err := newProvider.ActiveRoot() if err != nil { diff --git a/agent/consul/leader.go b/agent/consul/leader.go index e959e365e0a8..31eff836951a 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -427,11 +427,17 @@ func (s *Server) initializeCA() error { return err } - // Initialize the right provider based on the config + // Initialize the provider based on the current config. provider, err := s.createCAProvider(conf) if err != nil { return err } + if err := provider.Configure(conf.ClusterID, true, conf.Config); err != nil { + return fmt.Errorf("error configuring provider: %v", err) + } + if err := provider.GenerateRoot(); err != nil { + return fmt.Errorf("error generating CA root certificate: %v", err) + } // Get the active root cert from the CA rootPEM, err := provider.ActiveRoot() @@ -520,9 +526,9 @@ func parseCARoot(pemValue, provider string) (*structs.CARoot, error) { func (s *Server) createCAProvider(conf *structs.CAConfiguration) (ca.Provider, error) { switch conf.Provider { case structs.ConsulCAProvider: - return ca.NewConsulProvider(conf.Config, &consulCADelegate{s}) + return &ca.ConsulProvider{Delegate: &consulCADelegate{s}}, nil case structs.VaultCAProvider: - return ca.NewVaultProvider(conf.Config, conf.ClusterID) + return &ca.VaultProvider{}, nil default: return nil, fmt.Errorf("unknown CA provider %q", conf.Provider) } diff --git a/agent/http_oss.go b/agent/http_oss.go index ac5eff335d2c..17c2b3e6075e 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -23,6 +23,8 @@ func init() { registerEndpoint("/v1/agent/join/", []string{"PUT"}, (*HTTPServer).AgentJoin) registerEndpoint("/v1/agent/leave", []string{"PUT"}, (*HTTPServer).AgentLeave) registerEndpoint("/v1/agent/force-leave/", []string{"PUT"}, (*HTTPServer).AgentForceLeave) + registerEndpoint("/v1/agent/health/service/id/", []string{"GET"}, (*HTTPServer).AgentHealthServiceId) + registerEndpoint("/v1/agent/health/service/name/", []string{"GET"}, (*HTTPServer).AgentHealthServiceName) registerEndpoint("/v1/agent/check/register", []string{"PUT"}, (*HTTPServer).AgentRegisterCheck) registerEndpoint("/v1/agent/check/deregister/", []string{"PUT"}, (*HTTPServer).AgentDeregisterCheck) registerEndpoint("/v1/agent/check/pass/", []string{"PUT"}, (*HTTPServer).AgentCheckPass) diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 76162c2f6563..82215d1b561f 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -252,9 +252,10 @@ type ConsulCAProviderConfig struct { // CAConsulProviderState is used to track the built-in Consul CA provider's state. type CAConsulProviderState struct { - ID string - PrivateKey string - RootCert string + ID string + PrivateKey string + RootCert string + IntermediateCert string RaftIndex } diff --git a/command/connect/proxy/register_test.go b/command/connect/proxy/register_test.go index 89567807a00e..9ff3dff0df7e 100644 --- a/command/connect/proxy/register_test.go +++ b/command/connect/proxy/register_test.go @@ -46,16 +46,18 @@ func TestRegisterMonitor_heartbeat(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") m, _ := testMonitor(t, client) defer m.Close() + retry.Run(t, func(r *retry.R) { + // Get the check and verify that it is passing + checks, err := client.Agent().Checks() + require.NoError(err) + require.Contains(checks, m.checkID()) + require.Equal("passing", checks[m.checkID()].Status) + // Purposely fail the TTL check, verify it becomes healthy again + require.NoError(client.Agent().FailTTL(m.checkID(), "")) + }) - // Get the check and verify that it is passing - checks, err := client.Agent().Checks() - require.NoError(err) - require.Contains(checks, m.checkID()) - require.Equal("passing", checks[m.checkID()].Status) - - // Purposely fail the TTL check, verify it becomes healthy again - require.NoError(client.Agent().FailTTL(m.checkID(), "")) retry.Run(t, func(r *retry.R) { + checks, err := client.Agent().Checks() if err != nil { r.Fatalf("err: %s", err) diff --git a/command/watch/watch_test.go b/command/watch/watch_test.go index ff6463852cd8..8f7e4c2b5064 100644 --- a/command/watch/watch_test.go +++ b/command/watch/watch_test.go @@ -4,9 +4,8 @@ import ( "strings" "testing" - "github.com/hashicorp/consul/testrpc" - "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/testrpc" "github.com/mitchellh/cli" ) diff --git a/ui/Gemfile b/ui/Gemfile index 77b884c352dc..ce0147581d34 100644 --- a/ui/Gemfile +++ b/ui/Gemfile @@ -1,6 +1,7 @@ # A sample Gemfile source "https://rubygems.org" +gem "ffi", "~> 1.9.24" gem "uglifier" gem "sass" gem "therubyracer" diff --git a/ui/Gemfile.lock b/ui/Gemfile.lock index 3221f95639d5..42d5a015a3c6 100644 --- a/ui/Gemfile.lock +++ b/ui/Gemfile.lock @@ -2,7 +2,7 @@ GEM remote: https://rubygems.org/ specs: execjs (2.7.0) - ffi (1.9.23) + ffi (1.9.25) libv8 (3.16.14.19) rb-fsevent (0.10.3) rb-inotify (0.9.10) @@ -23,9 +23,10 @@ PLATFORMS ruby DEPENDENCIES + ffi (~> 1.9.24) sass therubyracer uglifier BUNDLED WITH - 1.16.0 + 1.16.1 diff --git a/version/version.go b/version/version.go index 7e785656b9a5..5800c96e50d2 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ var ( // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. - VersionPrerelease = "" + VersionPrerelease = "dev" ) // GetHumanVersion composes the parts of the version in a way that's suitable diff --git a/website/source/api/agent/service.html.md b/website/source/api/agent/service.html.md index 5877238b37ac..73331540e51e 100644 --- a/website/source/api/agent/service.html.md +++ b/website/source/api/agent/service.html.md @@ -61,6 +61,191 @@ $ curl \ } ``` +## Get local service health + +The `/v1/agent/health/service/name/` and +`/v1/agent/health/service/id/` endpoints allow retrieving an +aggregated state of service(s) of the local agent. + +The endpoint `/v1/agent/health/service/name/` queries all +services with a given names (several may match), while +`/v1/agent/health/service/id/` will match a single service only. + +If you know the ID of service you want to target, it is recommended to use +the version `/v1/agent/health/service/id/` so you have the result +for the service only. When requesting +`/v1/agent/health/service/name/`, the caller will receive the +worst state of all services having the given name. + +Those endpoints support JSON format and text/plain formats, JSON being the +default. In order to get the text format, you can append `?format=text` to +the URL or use Mime Content negotiation by specifying a HTTP Header +`Accept` starting with `text/plain`. + +Those endpoints return the aggregated values of all healthchecks for the +service and will return the corresponding HTTP codes: + +| Result | Meaning | +| ------ | ------------------------------------------------------ | +| `200` | All healthchecks of this service are passing | +| `400` | Bad parameter (missing service name of id) | +| `404` | No such service id or name | +| `429` | Some healthchecks are passing, at least one is warning | +| `503` | At least one of the healthchecks is critical | + +Those endpoints might be usefull for the following use-cases: + +* a load-balancer wants to check IP connectivity with an agent and retrieve + the aggregated status of given service +* create aliases for a given service (thus, the healthcheck of alias uses + http://localhost:8500/v1/agent/service/id/aliased_service_id healthcheck) + +### Sample Requests + +Given 2 services with name `web`, with web2 critical and web1 passing: + +#### List worst statuses of all instances of web-demo services (HTTP 503) + +##### By Name, Text + +```shell +curl http://localhost:8500/v1/agent/health/service/name/web?format=text +critical +``` + +##### By Name, JSON + +In JSON, the detail of passing/warning/critical services is present in output, +in a array. + +```shell +curl localhost:8500/v1/agent/health/service/name/web +``` + +```json +{ + "critical": [ + { + "ID": "web2", + "Service": "web", + "Tags": [ + "rails" + ], + "Address": "", + "Meta": null, + "Port": 80, + "EnableTagOverride": false, + "ProxyDestination": "", + "Connect": { + "Native": false, + "Proxy": null + }, + "CreateIndex": 0, + "ModifyIndex": 0 + } + ], + "passing": [ + { + "ID": "web1", + "Service": "web", + "Tags": [ + "rails" + ], + "Address": "", + "Meta": null, + "Port": 80, + "EnableTagOverride": false, + "ProxyDestination": "", + "Connect": { + "Native": false, + "Proxy": null + }, + "CreateIndex": 0, + "ModifyIndex": 0 + } + ] +} +``` + +#### List status of web2 (HTTP 503) + +##### Failure By ID, Text + +```shell +curl http://localhost:8500/v1/agent/health/service/id/web2?format=text +critical +``` + +##### Failure By ID, JSON + +In JSON, the output per ID is not an array, but only contains the value +of service. + +```shell +curl localhost:8500/v1/agent/health/service/id/web2 +``` + +```json +{ + "critical": { + "ID": "web2", + "Service": "web", + "Tags": [ + "rails" + ], + "Address": "", + "Meta": null, + "Port": 80, + "EnableTagOverride": false, + "ProxyDestination": "", + "Connect": { + "Native": false, + "Proxy": null + }, + "CreateIndex": 0, + "ModifyIndex": 0 + } +} +``` + +#### List status of web2 (HTTP 200) + +##### Success By ID, Text + +```shell +curl localhost:8500/v1/agent/health/service/id/web1?format=text +passing +``` + +#### Success By ID, JSON + +```shell +curl localhost:8500/v1/agent/health/service/id/web1 +``` + +```json +{ + "passing": { + "ID": "web1", + "Service": "web", + "Tags": [ + "rails" + ], + "Address": "", + "Meta": null, + "Port": 80, + "EnableTagOverride": false, + "ProxyDestination": "", + "Connect": { + "Native": false, + "Proxy": null + }, + "CreateIndex": 0, + "ModifyIndex": 0 + } +} +``` + ## Register Service This endpoint adds a new service, with an optional health check, to the local diff --git a/website/source/docs/platform/k8s/index.html.md b/website/source/docs/platform/k8s/index.html.md index 31ef6c5f59cd..9b5a755be0fc 100644 --- a/website/source/docs/platform/k8s/index.html.md +++ b/website/source/docs/platform/k8s/index.html.md @@ -3,13 +3,13 @@ layout: "docs" page_title: "Kubernetes" sidebar_current: "docs-platform-k8s-index" description: |- - Consul has many integrations with Kubernetes. You can deploy Consul to Kubernetes using the Helm Chart, sync services between Consul and Kubernetes, automatically secure Pod communication with Connect, and more. This section documents the official integrations between Consul and Kubernetes. + Consul has many integrations with Kubernetes. You can deploy Consul to Kubernetes using the Helm chart, sync services between Consul and Kubernetes, automatically secure Pod communication with Connect, and more. This section documents the official integrations between Consul and Kubernetes. --- # Kubernetes Consul has many integrations with Kubernetes. You can deploy Consul -to Kubernetes using the Helm Chart, sync services between Consul and +to Kubernetes using the Helm chart, sync services between Consul and Kubernetes, automatically secure Pod communication with Connect, and more. This section documents the official integrations between Consul and Kubernetes. diff --git a/website/source/docs/platform/k8s/run.html.md b/website/source/docs/platform/k8s/run.html.md index eafb5776f129..15bf7e9143ff 100644 --- a/website/source/docs/platform/k8s/run.html.md +++ b/website/source/docs/platform/k8s/run.html.md @@ -20,17 +20,17 @@ down to the [architecture](/docs/platform/k8s/run.html#architecture) section. ## Helm Chart The recommended way to run Consul on Kubernetes is via the -[Helm Chart](/docs/platform/k8s/helm.html). This will install and configure +[Helm chart](/docs/platform/k8s/helm.html). This will install and configure all the necessary components to run Consul. The configuration enables you to run just a server cluster, just a client cluster, or both. Using the Helm -Chart, you can have a full Consul deployment up and running in seconds. +chart, you can have a full Consul deployment up and running in seconds. -While the Helm Chart exposes dozens of useful configurations and automatically +While the Helm chart exposes dozens of useful configurations and automatically sets up complex resources, it **does not automatically operate Consul.** You are still reponsible for learning how to monitor, backup, upgrade, etc. the Consul cluster. -The Helm Chart has no required configuration and will install a Consul +The Helm chart has no required configuration and will install a Consul cluster with sane defaults out of the box. Prior to going to production, it is highly recommended that you [learn about the configuration options](/docs/platform/k8s/helm.html#configuration-values-).