Skip to content

Commit

Permalink
Merge pull request kubernetes#55053 from xiangpengzhao/version-check-…
Browse files Browse the repository at this point in the history
…auth

Automatic merge from submit-queue (batch tested with PRs 55063, 54523, 55053). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Don't need to check version for auth e2e test

**What this PR does / why we need it**:
In 1.9 cycle, some e2e test don't need to run against so older versions.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
ref: kubernetes#55050

**Special notes for your reviewer**:
/cc @tallclair @liggitt

**Release note**:

```release-note
NONE
```
  • Loading branch information
Kubernetes Submit Queue authored Nov 3, 2017
2 parents 85877a5 + 32675e6 commit 92952cf
Show file tree
Hide file tree
Showing 9 changed files with 88 additions and 258 deletions.
1 change: 0 additions & 1 deletion test/e2e/auth/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ go_library(
"//pkg/security/apparmor:go_default_library",
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
"//pkg/security/podsecuritypolicy/util:go_default_library",
"//pkg/util/version:go_default_library",
"//plugin/pkg/admission/serviceaccount:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
Expand Down
32 changes: 10 additions & 22 deletions test/e2e/auth/service_accounts.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
Expand All @@ -36,10 +35,6 @@ import (

var mountImage = imageutils.GetE2EImage(imageutils.Mounttest)

var serviceAccountTokenNamespaceVersion = utilversion.MustParseSemantic("v1.2.0")

var serviceAccountTokenAutomountVersion = utilversion.MustParseSemantic("v1.6.0-alpha.2")

var _ = SIGDescribe("ServiceAccounts", func() {
f := framework.NewDefaultFramework("svcaccounts")

Expand Down Expand Up @@ -220,16 +215,13 @@ var _ = SIGDescribe("ServiceAccounts", func() {
},
}

supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.ClientSet.Discovery())
if supportsTokenNamespace {
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
Name: "namespace-test",
Image: mountImage,
Args: []string{
fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey),
},
})
}
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
Name: "namespace-test",
Image: mountImage,
Args: []string{
fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey),
},
})

f.TestContainerOutput("consume service account token", pod, 0, []string{
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey, tokenContent),
Expand All @@ -238,16 +230,12 @@ var _ = SIGDescribe("ServiceAccounts", func() {
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey, rootCAContent),
})

if supportsTokenNamespace {
f.TestContainerOutput("consume service account namespace", pod, 2, []string{
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey, f.Namespace.Name),
})
}
f.TestContainerOutput("consume service account namespace", pod, 2, []string{
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey, f.Namespace.Name),
})
})

framework.ConformanceIt("should allow opting out of API token automount ", func() {
framework.SkipUnlessServerVersionGTE(serviceAccountTokenAutomountVersion, f.ClientSet.Discovery())

var err error
trueValue := true
falseValue := false
Expand Down
62 changes: 15 additions & 47 deletions test/e2e/framework/kubelet_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -280,33 +280,17 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration

// getStatsSummary contacts kubelet for the container information.
func getStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}

ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()

var data []byte
if subResourceProxyAvailable {
data, err = c.CoreV1().RESTClient().Get().
Context(ctx).
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/summary").
Do().Raw()

} else {
data, err = c.CoreV1().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/summary").
Do().Raw()
}
data, err := c.CoreV1().RESTClient().Get().
Context(ctx).
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/summary").
Do().Raw()

if err != nil {
return nil, err
}
Expand Down Expand Up @@ -406,30 +390,14 @@ func getOneTimeResourceUsageOnNode(
}

func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}
data, err := c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/summary").
SetHeader("Content-Type", "application/json").
Do().Raw()

var data []byte
if subResourceProxyAvailable {
data, err = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/summary").
SetHeader("Content-Type", "application/json").
Do().Raw()

} else {
data, err = c.CoreV1().RESTClient().Get().
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/summary").
SetHeader("Content-Type", "application/json").
Do().Raw()
}
if err != nil {
return nil, err
}
Expand Down
32 changes: 8 additions & 24 deletions test/e2e/framework/metrics_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -443,11 +443,6 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
ExpectNoError(err)

subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, c.Discovery())
if err != nil {
return nil, err
}

var data string
var masterRegistered = false
for _, node := range nodes.Items {
Expand All @@ -460,25 +455,14 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) {
defer cancel()

var rawData []byte
if subResourceProxyAvailable {
rawData, err = c.CoreV1().RESTClient().Get().
Context(ctx).
Namespace(metav1.NamespaceSystem).
Resource("pods").
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
SubResource("proxy").
Suffix("metrics").
Do().Raw()
} else {
rawData, err = c.CoreV1().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Namespace(metav1.NamespaceSystem).
SubResource("pods").
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
Suffix("metrics").
Do().Raw()
}
rawData, err = c.CoreV1().RESTClient().Get().
Context(ctx).
Namespace(metav1.NamespaceSystem).
Resource("pods").
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
SubResource("proxy").
Suffix("metrics").
Do().Raw()

ExpectNoError(err)
data = string(rawData)
Expand Down
76 changes: 16 additions & 60 deletions test/e2e/framework/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,24 +244,8 @@ func GetPauseImageNameForHostArch() string {
return currentPodInfraContainerImageName + "-" + goruntime.GOARCH + ":" + currentPodInfraContainerImageVersion
}

// SubResource proxy should have been functional in v1.0.0, but SubResource
// proxy via tunneling is known to be broken in v1.0. See
// https://github.com/kubernetes/kubernetes/pull/15224#issuecomment-146769463
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively
// in v1.3).
var SubResourcePodProxyVersion = utilversion.MustParseSemantic("v1.1.0")
var SubResourceServiceAndNodeProxyVersion = utilversion.MustParseSemantic("v1.2.0")

func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}
if subResourceProxyAvailable {
return request.Resource("services").SubResource("proxy"), nil
}
return request.Prefix("proxy").Resource("services"), nil
return request.Resource("services").SubResource("proxy"), nil
}

// unique identifier of the e2e run
Expand Down Expand Up @@ -1672,34 +1656,19 @@ func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, r.c.Discovery())
if err != nil {
return false, err
}

ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()

var body []byte
if subResourceProxyAvailable {
body, err = r.c.CoreV1().RESTClient().Get().
Context(ctx).
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do().
Raw()
} else {
body, err = r.c.CoreV1().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Namespace(r.ns).
Resource("pods").
Name(string(pod.Name)).
Do().
Raw()
}
body, err := r.c.CoreV1().RESTClient().Get().
Context(ctx).
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do().
Raw()

if err != nil {
if ctx.Err() != nil {
// We may encounter errors here because of a race between the pod readiness and apiserver
Expand Down Expand Up @@ -4404,29 +4373,16 @@ const proxyTimeout = 2 * time.Minute
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return restclient.Result{}, err
}
var result restclient.Result
finished := make(chan struct{})
go func() {
if subResourceProxyAvailable {
result = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
result = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()

} else {
result = c.CoreV1().RESTClient().Get().
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
}
finished <- struct{}{}
}()
select {
Expand Down
45 changes: 12 additions & 33 deletions test/e2e/instrumentation/monitoring/influxdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,42 +66,21 @@ var (

// Query sends a command to the server and returns the Response
func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}

ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()

var result []byte
if subResourceProxyAvailable {
result, err = c.CoreV1().RESTClient().Get().
Context(ctx).
Namespace("kube-system").
Resource("services").
Name(influxdbService+":api").
SubResource("proxy").
Suffix("query").
Param("q", query).
Param("db", influxdbDatabaseName).
Param("epoch", "s").
Do().
Raw()
} else {
result, err = c.CoreV1().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Namespace("kube-system").
Resource("services").
Name(influxdbService+":api").
Suffix("query").
Param("q", query).
Param("db", influxdbDatabaseName).
Param("epoch", "s").
Do().
Raw()
}
result, err := c.CoreV1().RESTClient().Get().
Context(ctx).
Namespace("kube-system").
Resource("services").
Name(influxdbService+":api").
SubResource("proxy").
Suffix("query").
Param("q", query).
Param("db", influxdbDatabaseName).
Param("epoch", "s").
Do().
Raw()

if err != nil {
if ctx.Err() != nil {
Expand Down
Loading

0 comments on commit 92952cf

Please sign in to comment.