Skip to content

Commit

Permalink
Also probe aggregated API ports in new ciphers test
Browse files Browse the repository at this point in the history
Co-authored-by: Joshua Casey <joshuatcasey@gmail.com>
  • Loading branch information
cfryanr and joshuatcasey committed Jun 14, 2024
1 parent 75ff3ef commit 1f8ac0f
Show file tree
Hide file tree
Showing 7 changed files with 209 additions and 108 deletions.
35 changes: 27 additions & 8 deletions test/integration/limited_ciphers_fips_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,39 @@ import (
"testing"
)

// TestLimitedCiphers_Disruptive will confirm that the Pinniped Supervisor exposes only those ciphers listed in
// configuration.
// This does not test the Concierge (which has the same feature) since the Concierge does not have exposed API
// endpoints with the Default profile.
// TestLimitedCiphersFIPS_Disruptive will confirm that the Pinniped Supervisor and Concierge expose only those
// ciphers listed in configuration, when compiled in FIPS mode.
// This does not test the CLI, since it does not have a feature to limit cipher suites.
func TestLimitedCiphersFIPS_Disruptive(t *testing.T) {
performLimitedCiphersTest(t,
// The user-configured ciphers for both the Supervisor and Concierge.
// This is a subset of the hardcoded ciphers from profiles_fips_strict.go.
[]string{
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_256_GCM_SHA384", // this is an insecure cipher but allowed for FIPS
},
[]uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
})
// Expected server configuration for the Supervisor's OIDC endpoints.
&tls.Config{
MinVersion: tls.VersionTLS12, // Supervisor OIDC always allows TLS 1.2 clients to connect
MaxVersion: tls.VersionTLS12, // boringcrypto does not use TLS 1.3 yet
CipherSuites: []uint16{
// Supervisor OIDC endpoints configured with EC certs use only EC ciphers.
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
},
},
// Expected server configuration for the Supervisor and Concierge aggregated API endpoints.
&tls.Config{
MinVersion: tls.VersionTLS12, // boringcrypto does not use TLS 1.3 yet
MaxVersion: tls.VersionTLS12, // boringcrypto does not use TLS 1.3 yet
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
},
},
)
}
29 changes: 21 additions & 8 deletions test/integration/limited_ciphers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,21 +10,34 @@ import (
"testing"
)

// TestLimitedCiphersNotFIPS_Disruptive will confirm that the Pinniped Supervisor exposes only those ciphers listed in
// configuration.
// This does not test the Concierge (which has the same feature) since the Concierge does not have exposed API
// endpoints with the Default profile.
// TestLimitedCiphersNotFIPS_Disruptive will confirm that the Pinniped Supervisor and Concierge expose only those
// ciphers listed in configuration, when compiled in non-FIPS mode.
// This does not test the CLI, since it does not have a feature to limit cipher suites.
func TestLimitedCiphersNotFIPS_Disruptive(t *testing.T) {
performLimitedCiphersTest(t,
// The user-configured ciphers for both the Supervisor and Concierge.
// This is a subset of the hardcoded ciphers from profiles.go.
[]string{
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
},
[]uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
})
// Expected server configuration for the Supervisor's OIDC endpoints.
&tls.Config{
MinVersion: tls.VersionTLS12, // Supervisor OIDC always allows TLS 1.2 clients to connect
MaxVersion: tls.VersionTLS13,
CipherSuites: []uint16{
// Supervisor OIDC endpoints configured with EC certs use only EC ciphers.
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
},
},
// Expected server configuration for the Supervisor and Concierge aggregated API endpoints.
&tls.Config{
MinVersion: tls.VersionTLS13, // do not allow TLS 1.2 clients to connect
MaxVersion: tls.VersionTLS13,
CipherSuites: nil, // TLS 1.3 ciphers are not configurable
},
)
}
208 changes: 132 additions & 76 deletions test/integration/limited_ciphers_utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,78 +6,170 @@ package integration
import (
"context"
"crypto/tls"
"strconv"
"testing"
"time"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"

"go.pinniped.dev/internal/config/concierge"
"go.pinniped.dev/internal/config/supervisor"
"go.pinniped.dev/test/testlib"
)

func performLimitedCiphersTest(t *testing.T, allowedCiphers []string, expectedCiphers []uint16) {
env := testOnKindWithPodShutdown(t)
type stringEditorFunc func(t *testing.T, in string) string

func performLimitedCiphersTest(
t *testing.T,
allowedCiphersConfig []string,
expectedConfigForSupervisorOIDCEndpoints *tls.Config,
expectedConfigForAggregatedAPIEndpoints *tls.Config,
) {
env := testEnvForPodShutdownTests(t)

client := testlib.NewKubernetesClientset(t)
configMapClient := client.CoreV1().ConfigMaps(env.SupervisorNamespace)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
t.Cleanup(cancel)

staticConfigMapName := env.SupervisorAppName + "-static-config"
supervisorStaticConfigMap, err := configMapClient.Get(ctx, staticConfigMapName, metav1.GetOptions{})
require.NoError(t, err)
editSupervisorAllowedCiphersConfig := func(t *testing.T, configMapData string) string {
t.Helper()

originalSupervisorConfig := supervisorStaticConfigMap.Data["pinniped.yaml"]
require.NotEmpty(t, originalSupervisorConfig)
var config supervisor.Config
err := yaml.Unmarshal([]byte(configMapData), &config)
require.NoError(t, err)

t.Cleanup(func() {
supervisorStaticConfigMapCleanup, err := configMapClient.Get(ctx, staticConfigMapName, metav1.GetOptions{})
require.Empty(t, config.TLS.OneDotTwo.AllowedCiphers) // precondition

config.TLS.OneDotTwo.AllowedCiphers = allowedCiphersConfig

updatedConfig, err := yaml.Marshal(config)
require.NoError(t, err)
return string(updatedConfig)
}

supervisorStaticConfigMapCleanup.Data = make(map[string]string)
supervisorStaticConfigMapCleanup.Data["pinniped.yaml"] = originalSupervisorConfig
editConciergeAllowedCiphersConfig := func(t *testing.T, configMapData string) string {
t.Helper()

_, err = configMapClient.Update(ctx, supervisorStaticConfigMapCleanup, metav1.UpdateOptions{})
var config concierge.Config
err := yaml.Unmarshal([]byte(configMapData), &config)
require.NoError(t, err)

// this will cycle all the pods
restartAllPodsOfApp(t, env.SupervisorNamespace, env.SupervisorAppName, false)
})
require.Empty(t, config.TLS.OneDotTwo.AllowedCiphers) // precondition

var config supervisor.Config
err = yaml.Unmarshal([]byte(originalSupervisorConfig), &config)
require.NoError(t, err)
config.TLS.OneDotTwo.AllowedCiphers = allowedCiphersConfig

// As a precondition of this test, ensure that the list of allowedCiphers is empty
require.Empty(t, config.TLS.OneDotTwo.AllowedCiphers)
updatedConfig, err := yaml.Marshal(config)
require.NoError(t, err)
return string(updatedConfig)
}

config.TLS.OneDotTwo.AllowedCiphers = allowedCiphers
// Update Supervisor's allowed ciphers in its static configmap and restart pods.
updateStaticConfigMapAndRestartApp(t,
ctx,
env.SupervisorNamespace,
env.SupervisorAppName+"-static-config",
env.SupervisorAppName,
false,
editSupervisorAllowedCiphersConfig,
)

// Update Concierge's allowed ciphers in its static configmap and restart pods.
updateStaticConfigMapAndRestartApp(t,
ctx,
env.ConciergeNamespace,
env.ConciergeAppName+"-config",
env.ConciergeAppName,
true,
editConciergeAllowedCiphersConfig,
)

// Probe TLS config of Supervisor's OIDC endpoints.
expectTLSConfigForServicePort(t, ctx,
env.SupervisorAppName+"-nodeport", env.SupervisorNamespace, "10509",
expectedConfigForSupervisorOIDCEndpoints,
)

// Probe TLS config of Supervisor's aggregated endpoints.
expectTLSConfigForServicePort(t, ctx,
env.SupervisorAppName+"-api", env.SupervisorNamespace, "10510",
expectedConfigForAggregatedAPIEndpoints,
)

// Probe TLS config of Concierge's aggregated endpoints.
expectTLSConfigForServicePort(t, ctx,
env.ConciergeAppName+"-api", env.ConciergeNamespace, "10511",
expectedConfigForAggregatedAPIEndpoints,
)
}

updatedSupervisorConfig, err := yaml.Marshal(config)
func expectTLSConfigForServicePort(
t *testing.T,
ctx context.Context,
serviceName string,
serviceNamespace string,
localPortAsStr string,
expectedConfig *tls.Config,
) {
portAsInt, err := strconv.Atoi(localPortAsStr)
require.NoError(t, err)
portAsUint := uint16(portAsInt) // okay to cast because it will only be legal port numbers

startKubectlPortForward(ctx, t, localPortAsStr, "443", serviceName, serviceNamespace)

stdout, stderr := testlib.RunNmapSSLEnum(t, "127.0.0.1", portAsUint)
require.Empty(t, stderr)

expectedNMapOutput := testlib.GetExpectedCiphers(expectedConfig, "server")
assert.Contains(t,
stdout,
expectedNMapOutput,
"actual nmap output:\n%s", stdout,
"but was expected to contain:\n%s", expectedNMapOutput,
)
}

supervisorStaticConfigMap.Data = make(map[string]string)
supervisorStaticConfigMap.Data["pinniped.yaml"] = string(updatedSupervisorConfig)
func updateStaticConfigMapAndRestartApp(
t *testing.T,
ctx context.Context,
namespace string,
staticConfigMapName string,
appName string,
isConcierge bool,
editConfigMapFunc stringEditorFunc,
) {
configMapClient := testlib.NewKubernetesClientset(t).CoreV1().ConfigMaps(namespace)

_, err = configMapClient.Update(ctx, supervisorStaticConfigMap, metav1.UpdateOptions{})
staticConfigMap, err := configMapClient.Get(ctx, staticConfigMapName, metav1.GetOptions{})
require.NoError(t, err)

// this will cycle all the pods
restartAllPodsOfApp(t, env.SupervisorNamespace, env.SupervisorAppName, false)
originalConfig := staticConfigMap.Data["pinniped.yaml"]
require.NotEmpty(t, originalConfig)

startKubectlPortForward(ctx, t, "10509", "443", env.SupervisorAppName+"-nodeport", env.SupervisorNamespace)
stdout, stderr := testlib.RunNmapSSLEnum(t, "127.0.0.1", 10509)
require.Empty(t, stderr)
t.Cleanup(func() {
cleanupCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
t.Cleanup(cancel)

expectedCiphersConfig := &tls.Config{
MinVersion: tls.VersionTLS12,
MaxVersion: testlib.MaxTLSVersion,
CipherSuites: expectedCiphers,
}
staticConfigMapForCleanup, err := configMapClient.Get(cleanupCtx, staticConfigMapName, metav1.GetOptions{})
require.NoError(t, err)

staticConfigMapForCleanup.Data = make(map[string]string)
staticConfigMapForCleanup.Data["pinniped.yaml"] = originalConfig

_, err = configMapClient.Update(cleanupCtx, staticConfigMapForCleanup, metav1.UpdateOptions{})
require.NoError(t, err)

restartAllPodsOfApp(t, namespace, appName, isConcierge)
})

require.Contains(t, stdout, testlib.GetExpectedCiphers(expectedCiphersConfig, "server"), "stdout:\n%s", stdout)
staticConfigMap.Data = make(map[string]string)
staticConfigMap.Data["pinniped.yaml"] = editConfigMapFunc(t, originalConfig)

_, err = configMapClient.Update(ctx, staticConfigMap, metav1.UpdateOptions{})
require.NoError(t, err)

restartAllPodsOfApp(t, namespace, appName, isConcierge)
}

// restartAllPodsOfApp will immediately scale to 0 and then scale back.
Expand All @@ -101,6 +193,7 @@ func restartAllPodsOfApp(

// Scale down the deployment's number of replicas to 0, which will shut down all the pods.
originalScale := updateDeploymentScale(t, namespace, appName, 0)
require.Greater(t, originalScale, 0)

testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
newPods := getRunningPodsByNamePrefix(t, namespace, appName+"-", ignorePodsWithNameSubstring)
Expand All @@ -113,43 +206,6 @@ func restartAllPodsOfApp(
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
newPods := getRunningPodsByNamePrefix(t, namespace, appName+"-", ignorePodsWithNameSubstring)
requireEventually.Len(newPods, originalScale, "wanted %d pods", originalScale)
requireEventually.True(allPodsReady(newPods), "wanted all new pods to be ready")
}, 2*time.Minute, 200*time.Millisecond)
}

// TestRemoveAllowedCiphersFromStaticConfig_Disruptive updates the Supervisor's static configuration to make sure that the allowed ciphers list is empty.
// It will restart the Supervisor pods. Skipped because it's only here for local testing purposes.
func TestRemoveAllowedCiphersFromStaticConfig_Disruptive(t *testing.T) {
t.Skip()

env := testOnKindWithPodShutdown(t)

client := testlib.NewKubernetesClientset(t)
configMapClient := client.CoreV1().ConfigMaps(env.SupervisorNamespace)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()

staticConfigMapName := env.SupervisorAppName + "-static-config"
supervisorStaticConfigMap, err := configMapClient.Get(ctx, staticConfigMapName, metav1.GetOptions{})
require.NoError(t, err)

originalSupervisorConfig := supervisorStaticConfigMap.Data["pinniped.yaml"]
require.NotEmpty(t, originalSupervisorConfig)

var config supervisor.Config
err = yaml.Unmarshal([]byte(originalSupervisorConfig), &config)
require.NoError(t, err)

config.TLS.OneDotTwo.AllowedCiphers = nil

updatedConfigBytes, err := yaml.Marshal(config)
require.NoError(t, err)

supervisorStaticConfigMap.Data = make(map[string]string)
supervisorStaticConfigMap.Data["pinniped.yaml"] = string(updatedConfigBytes)

_, err = configMapClient.Update(ctx, supervisorStaticConfigMap, metav1.UpdateOptions{})
require.NoError(t, err)

// this will cycle all the pods
restartAllPodsOfApp(t, env.SupervisorNamespace, env.SupervisorAppName, false)
}
Loading

0 comments on commit 1f8ac0f

Please sign in to comment.