Skip to content

Commit

Permalink
Merge branch 'asheshvidyut/NET-3865' of github.com:hashicorp/consul i…
Browse files Browse the repository at this point in the history
…nto asheshvidyut/NET-3865
  • Loading branch information
absolutelightning committed Jun 8, 2023
2 parents 3631708 + c461f59 commit 5d479e8
Show file tree
Hide file tree
Showing 51 changed files with 1,518 additions and 205 deletions.
4 changes: 4 additions & 0 deletions .changelog/17609.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
```release-note:bug
gateways: Fixed a bug in API gateways where binding a route that only targets a service imported from a peer results
in the programmed gateway having no routes.
```
3 changes: 3 additions & 0 deletions .changelog/_5669.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:improvement
audit-logging: **(Enterprise only)** enable error response and request body logging
```
3 changes: 3 additions & 0 deletions .changelog/_5740.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:feature
api: (Enterprise only) Add `POST /v1/operator/audit-hash` endpoint to calculate the hash of the data used by the audit log hash function and salt.
```
3 changes: 3 additions & 0 deletions .changelog/_5750.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:feature
cli: (Enterprise only) Add a new `consul operator audit hash` command to retrieve and compare the hash of the data used by the audit log hash function and salt.
```
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0

name: Nightly Test 1.12.x
name: Nightly Test 1.16.x
on:
schedule:
- cron: '0 4 * * *'
workflow_dispatch: {}

env:
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
BRANCH: "release/1.12.x"
BRANCH_NAME: "release-1.12.x" # Used for naming artifacts
BRANCH: "release/1.16.x"
BRANCH_NAME: "release-1.16.x" # Used for naming artifacts

jobs:
frontend-test-workspace-node:
Expand Down
11 changes: 11 additions & 0 deletions agent/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -1621,7 +1621,18 @@ func (a *Agent) RPC(ctx context.Context, method string, args interface{}, reply
method = e + "." + p[1]
}
}

// audit log only on consul clients
_, ok := a.delegate.(*consul.Client)
if ok {
a.writeAuditRPCEvent(method, "OperationStart")
}

a.endpointsLock.RUnlock()

defer func() {
a.writeAuditRPCEvent(method, "OperationComplete")
}()
return a.delegate.RPC(ctx, method, args, reply)
}

Expand Down
56 changes: 28 additions & 28 deletions agent/agent_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1822,7 +1822,7 @@ func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) {
for i := 1; i < 7; i++ {
contents, err := os.ReadFile(tmpFile)
if err != nil {
t.Fatalf("should be able to read file, but had: %#v", err)
r.Fatalf("should be able to read file, but had: %#v", err)
}
contentsStr = string(contents)
if contentsStr != "" {
Expand Down Expand Up @@ -1909,14 +1909,14 @@ func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) {
ensureNothingCritical(r, "red-is-dead")

if err := a.reloadConfigInternal(cfg2); err != nil {
t.Fatalf("got error %v want nil", err)
r.Fatalf("got error %v want nil", err)
}

// We check that reload does not go to critical
ensureNothingCritical(r, "red-is-dead")
ensureNothingCritical(r, "testing-agent-reload-001")

require.NoError(t, a.updateTTLCheck(checkID, api.HealthPassing, "testing-agent-reload-002"))
require.NoError(r, a.updateTTLCheck(checkID, api.HealthPassing, "testing-agent-reload-002"))

ensureNothingCritical(r, "red-is-dead")
})
Expand Down Expand Up @@ -2926,7 +2926,7 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) {
req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(nodeCheck))
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
require.Equal(t, http.StatusForbidden, resp.Code)
require.Equal(r, http.StatusForbidden, resp.Code)
})
})

Expand All @@ -2936,7 +2936,7 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) {
req.Header.Add("X-Consul-Token", svcToken.SecretID)
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
require.Equal(t, http.StatusForbidden, resp.Code)
require.Equal(r, http.StatusForbidden, resp.Code)
})
})

Expand All @@ -2946,7 +2946,7 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) {
req.Header.Add("X-Consul-Token", nodeToken.SecretID)
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
require.Equal(t, http.StatusOK, resp.Code)
require.Equal(r, http.StatusOK, resp.Code)
})
})

Expand All @@ -2955,7 +2955,7 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) {
req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(svcCheck))
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
require.Equal(t, http.StatusForbidden, resp.Code)
require.Equal(r, http.StatusForbidden, resp.Code)
})
})

Expand All @@ -2965,7 +2965,7 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) {
req.Header.Add("X-Consul-Token", nodeToken.SecretID)
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
require.Equal(t, http.StatusForbidden, resp.Code)
require.Equal(r, http.StatusForbidden, resp.Code)
})
})

Expand All @@ -2975,7 +2975,7 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) {
req.Header.Add("X-Consul-Token", svcToken.SecretID)
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
require.Equal(t, http.StatusOK, resp.Code)
require.Equal(r, http.StatusOK, resp.Code)
})
})
}
Expand Down Expand Up @@ -5976,17 +5976,17 @@ func TestAgent_Monitor(t *testing.T) {
res := httptest.NewRecorder()
a.srv.h.ServeHTTP(res, registerReq)
if http.StatusOK != res.Code {
t.Fatalf("expected 200 but got %v", res.Code)
r.Fatalf("expected 200 but got %v", res.Code)
}

// Wait until we have received some type of logging output
require.Eventually(t, func() bool {
require.Eventually(r, func() bool {
return len(resp.Body.Bytes()) > 0
}, 3*time.Second, 100*time.Millisecond)

cancelFunc()
code := <-codeCh
require.Equal(t, http.StatusOK, code)
require.Equal(r, http.StatusOK, code)
got := resp.Body.String()

// Only check a substring that we are highly confident in finding
Expand Down Expand Up @@ -6026,11 +6026,11 @@ func TestAgent_Monitor(t *testing.T) {
res := httptest.NewRecorder()
a.srv.h.ServeHTTP(res, registerReq)
if http.StatusOK != res.Code {
t.Fatalf("expected 200 but got %v", res.Code)
r.Fatalf("expected 200 but got %v", res.Code)
}

// Wait until we have received some type of logging output
require.Eventually(t, func() bool {
require.Eventually(r, func() bool {
return len(resp.Body.Bytes()) > 0
}, 3*time.Second, 100*time.Millisecond)
cancelFunc()
Expand Down Expand Up @@ -6063,24 +6063,24 @@ func TestAgent_Monitor(t *testing.T) {
res := httptest.NewRecorder()
a.srv.h.ServeHTTP(res, registerReq)
if http.StatusOK != res.Code {
t.Fatalf("expected 200 but got %v", res.Code)
r.Fatalf("expected 200 but got %v", res.Code)
}

// Wait until we have received some type of logging output
require.Eventually(t, func() bool {
require.Eventually(r, func() bool {
return len(resp.Body.Bytes()) > 0
}, 3*time.Second, 100*time.Millisecond)

cancelFunc()
code := <-codeCh
require.Equal(t, http.StatusOK, code)
require.Equal(r, http.StatusOK, code)

// Each line is output as a separate JSON object, we grab the first and
// make sure it can be unmarshalled.
firstLine := bytes.Split(resp.Body.Bytes(), []byte("\n"))[0]
var output map[string]interface{}
if err := json.Unmarshal(firstLine, &output); err != nil {
t.Fatalf("err: %v", err)
r.Fatalf("err: %v", err)
}
})
})
Expand Down Expand Up @@ -6672,7 +6672,7 @@ func TestAgentConnectCARoots_list(t *testing.T) {

dec := json.NewDecoder(resp.Body)
value := &structs.IndexedCARoots{}
require.NoError(t, dec.Decode(value))
require.NoError(r, dec.Decode(value))
if ca.ID != value.ActiveRootID {
r.Fatalf("%s != %s", ca.ID, value.ActiveRootID)
}
Expand Down Expand Up @@ -7080,7 +7080,7 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) {

dec := json.NewDecoder(resp.Body)
issued2 := &structs.IssuedCert{}
require.NoError(t, dec.Decode(issued2))
require.NoError(r, dec.Decode(issued2))
if issued.CertPEM == issued2.CertPEM {
r.Fatalf("leaf has not updated")
}
Expand All @@ -7092,9 +7092,9 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) {
}

// Verify that the cert is signed by the new CA
requireLeafValidUnderCA(t, issued2, ca)
requireLeafValidUnderCA(r, issued2, ca)

require.NotEqual(t, issued, issued2)
require.NotEqual(r, issued, issued2)
})
}
}
Expand Down Expand Up @@ -7471,11 +7471,11 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) {
// Try and sign again (note no index/wait arg since cache should update in
// background even if we aren't actively blocking)
a2.srv.h.ServeHTTP(resp, req)
require.Equal(t, http.StatusOK, resp.Code)
require.Equal(r, http.StatusOK, resp.Code)

dec := json.NewDecoder(resp.Body)
issued2 := &structs.IssuedCert{}
require.NoError(t, dec.Decode(issued2))
require.NoError(r, dec.Decode(issued2))
if issued.CertPEM == issued2.CertPEM {
r.Fatalf("leaf has not updated")
}
Expand All @@ -7487,9 +7487,9 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) {
}

// Verify that the cert is signed by the new CA
requireLeafValidUnderCA(t, issued2, dc1_ca2)
requireLeafValidUnderCA(r, issued2, dc1_ca2)

require.NotEqual(t, issued, issued2)
require.NotEqual(r, issued, issued2)
})
}

Expand All @@ -7499,12 +7499,12 @@ func waitForActiveCARoot(t *testing.T, srv *HTTPHandlers, expect *structs.CARoot
resp := httptest.NewRecorder()
srv.h.ServeHTTP(resp, req)
if http.StatusOK != resp.Code {
t.Fatalf("expected 200 but got %v", resp.Code)
r.Fatalf("expected 200 but got %v", resp.Code)
}

dec := json.NewDecoder(resp.Body)
roots := &structs.IndexedCARoots{}
require.NoError(t, dec.Decode(roots))
require.NoError(r, dec.Decode(roots))

var root *structs.CARoot
for _, r := range roots.Roots {
Expand Down
4 changes: 4 additions & 0 deletions agent/agent_oss.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,3 +69,7 @@ func (a *Agent) AgentEnterpriseMeta() *acl.EnterpriseMeta {
func (a *Agent) registerEntCache() {}

func (*Agent) fillEnterpriseProxyDataSources(*proxycfg.DataSources) {}

func (a *Agent) writeAuditRPCEvent(_ string, _ string) interface{} {
return nil
}
5 changes: 3 additions & 2 deletions agent/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -807,8 +807,9 @@ type ConfigEntries struct {

// Audit allows us to enable and define destinations for auditing
type Audit struct {
Enabled *bool `mapstructure:"enabled"`
Sinks map[string]AuditSink `mapstructure:"sink"`
Enabled *bool `mapstructure:"enabled"`
Sinks map[string]AuditSink `mapstructure:"sink"`
RPCEnabled *bool `mapstructure:"rpc_enabled"`
}

// AuditSink can be provided multiple times to define pipelines for auditing
Expand Down
4 changes: 2 additions & 2 deletions agent/consul/acl_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ func TestACLEndpoint_ReplicationStatus(t *testing.T) {
retry.Run(t, func(r *retry.R) {
var status structs.ACLReplicationStatus
err := msgpackrpc.CallWithCodec(codec, "ACL.ReplicationStatus", &getR, &status)
require.NoError(t, err)
require.NoError(r, err)

require.True(r, status.Enabled)
require.True(r, status.Running)
Expand Down Expand Up @@ -220,7 +220,7 @@ func TestACLEndpoint_TokenRead(t *testing.T) {
time.Sleep(200 * time.Millisecond)
err := aclEp.TokenRead(&req, &resp)
require.Error(r, err)
require.ErrorContains(t, err, "ACL not found")
require.ErrorContains(r, err, "ACL not found")
require.Nil(r, resp.Token)
})
})
Expand Down
2 changes: 1 addition & 1 deletion agent/consul/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ func TestClient_LANReap(t *testing.T) {
retry.Run(t, func(r *retry.R) {
require.Len(r, c1.LANMembersInAgentPartition(), 1)
server := c1.router.FindLANServer()
require.Nil(t, server)
require.Nil(r, server)
})
}

Expand Down
2 changes: 2 additions & 0 deletions agent/consul/merge_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (

"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/consul/version"
)

func TestMerge_LAN(t *testing.T) {
Expand Down Expand Up @@ -282,6 +283,7 @@ func makeTestNode(t *testing.T, tm testMember) *serf.Member {
"vsn": "2",
"vsn_max": "3",
"vsn_min": "2",
"fips": version.GetFIPSInfo(),
},
}
if tm.partition != "" {
Expand Down
2 changes: 1 addition & 1 deletion agent/consul/multilimiter/multilimiter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func TestRateLimiterCleanup(t *testing.T) {
retry.RunWith(&retry.Timer{Wait: 100 * time.Millisecond, Timeout: 10 * time.Second}, t, func(r *retry.R) {
v, ok := limiters.Get(key)
require.True(r, ok)
require.NotNil(t, v)
require.NotNil(r, v)
})

time.Sleep(c.ReconcileCheckInterval)
Expand Down
15 changes: 6 additions & 9 deletions agent/consul/prepared_query_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1539,17 +1539,15 @@ func TestPreparedQuery_Execute(t *testing.T) {
assert.Len(t, reply.Nodes, 0)
})

expectNodes := func(t *testing.T, query *structs.PreparedQueryRequest, reply *structs.PreparedQueryExecuteResponse, n int) {
t.Helper()
expectNodes := func(t require.TestingT, query *structs.PreparedQueryRequest, reply *structs.PreparedQueryExecuteResponse, n int) {
assert.Len(t, reply.Nodes, n)
assert.Equal(t, "dc1", reply.Datacenter)
assert.Equal(t, 0, reply.Failovers)
assert.Equal(t, query.Query.Service.Service, reply.Service)
assert.Equal(t, query.Query.DNS, reply.DNS)
assert.True(t, reply.QueryMeta.KnownLeader)
}
expectFailoverNodes := func(t *testing.T, query *structs.PreparedQueryRequest, reply *structs.PreparedQueryExecuteResponse, n int) {
t.Helper()
expectFailoverNodes := func(t require.TestingT, query *structs.PreparedQueryRequest, reply *structs.PreparedQueryExecuteResponse, n int) {
assert.Len(t, reply.Nodes, n)
assert.Equal(t, "dc2", reply.Datacenter)
assert.Equal(t, 1, reply.Failovers)
Expand All @@ -1558,8 +1556,7 @@ func TestPreparedQuery_Execute(t *testing.T) {
assert.True(t, reply.QueryMeta.KnownLeader)
}

expectFailoverPeerNodes := func(t *testing.T, query *structs.PreparedQueryRequest, reply *structs.PreparedQueryExecuteResponse, n int) {
t.Helper()
expectFailoverPeerNodes := func(t require.TestingT, query *structs.PreparedQueryRequest, reply *structs.PreparedQueryExecuteResponse, n int) {
assert.Len(t, reply.Nodes, n)
assert.Equal(t, "", reply.Datacenter)
assert.Equal(t, es.peeringServer.acceptingPeerName, reply.PeerName)
Expand Down Expand Up @@ -2372,13 +2369,13 @@ func TestPreparedQuery_Execute(t *testing.T) {
}

var reply structs.PreparedQueryExecuteResponse
require.NoError(t, msgpackrpc.CallWithCodec(es.server.codec, "PreparedQuery.Execute", &req, &reply))
require.NoError(r, msgpackrpc.CallWithCodec(es.server.codec, "PreparedQuery.Execute", &req, &reply))

for _, node := range reply.Nodes {
assert.NotEqual(t, "node3", node.Node.Node)
assert.NotEqual(r, "node3", node.Node.Node)
}

expectNodes(t, &query, &reply, 9)
expectNodes(r, &query, &reply, 9)
})
})
}
Expand Down
Loading

0 comments on commit 5d479e8

Please sign in to comment.