Skip to content

Commit

Permalink
add kubernetes credentials to job pods (#3912)
Browse files Browse the repository at this point in the history
* add kubernetes credentials to job pods

Signed-off-by: Min Min <jamsman94@gmail.com>

* fix local cluster logic

Signed-off-by: Min Min <jamsman94@gmail.com>

* rebase main

Signed-off-by: Min Min <jamsman94@gmail.com>

* add dockerfile and makefile

Signed-off-by: Min Min <jamsman94@gmail.com>

* add default value for preset

Signed-off-by: Min Min <jamsman94@gmail.com>

---------

Signed-off-by: Min Min <jamsman94@gmail.com>
  • Loading branch information
jamsman94 authored Dec 31, 2024
1 parent 53bdae6 commit 932fda7
Show file tree
Hide file tree
Showing 10 changed files with 126 additions and 37 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ debugtools: prereq $(DEBUG_TOOLS_TARGETS:=.push)
%.dev:
@docker buildx build -t ${MAKE_IMAGE_TAG} --platform linux/amd64 -f docker/$*.Dockerfile --push .

%.buildbase: MAKE_IMAGE_TAG ?= ${IMAGE_REPOSITORY}/build-base:$*
%.buildbase: MAKE_IMAGE_TAG ?= ${IMAGE_REPOSITORY}/build-base:$*-with-kubectl
%.buildbase:
@docker buildx build -t ${MAKE_IMAGE_TAG} --no-cache --platform linux/amd64,linux/arm64 -f docker/$*-base.Dockerfile --push .

Expand Down
11 changes: 10 additions & 1 deletion docker/bionic-base.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,16 @@ RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
wget -qO - https://package.perforce.com/perforce.pubkey | gpg --dearmor | sudo tee /usr/share/keyrings/perforce.gpg && \
echo deb [signed-by=/usr/share/keyrings/perforce.gpg] https://package.perforce.com/apt/ubuntu bionic release > /etc/apt/sources.list.d/perforce.list && \
apt-get update && apt-get install -y helix-p4d; \
apt-get update && apt-get install -y helix-p4d && \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \
chmod +x kubectl && \
mv kubectl /usr/local/bin; \
fi

RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" && \
chmod +x kubectl && \
mv kubectl /usr/local/bin; \
fi

# install docker client
Expand Down
11 changes: 10 additions & 1 deletion docker/focal-base.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,16 @@ RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
wget -qO - https://package.perforce.com/perforce.pubkey | gpg --dearmor | sudo tee /usr/share/keyrings/perforce.gpg && \
echo deb [signed-by=/usr/share/keyrings/perforce.gpg] https://package.perforce.com/apt/ubuntu focal release > /etc/apt/sources.list.d/perforce.list && \
apt-get update && apt-get install -y helix-p4d; \
apt-get update && apt-get install -y helix-p4d && \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \
chmod +x kubectl && \
mv kubectl /usr/local/bin; \
fi

RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" && \
chmod +x kubectl && \
mv kubectl /usr/local/bin; \
fi

# install docker client
Expand Down
5 changes: 5 additions & 0 deletions pkg/microservice/aslan/config/consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -675,3 +675,8 @@ const (
DistributeImageMethodImagePush DistributeImageMethod = "image_push"
DistributeImageMethodCloudSync DistributeImageMethod = "cloud_sync"
)

const (
AgentTypeZadigDefaultServiceAccountName = "koderover-agent"
KubeConfigTypeZadigDefaultServiceAccountName = "zadig-workflow-sa"
)
7 changes: 4 additions & 3 deletions pkg/microservice/aslan/core/common/repository/models/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,10 @@ type PreBuild struct {
// Parameters
Parameters []*Parameter `bson:"parameters,omitempty" json:"parameters"`
// UploadPkg uploads package to s3
UploadPkg bool `bson:"upload_pkg" json:"upload_pkg"`
ClusterID string `bson:"cluster_id" json:"cluster_id"`
StrategyID string `bson:"strategy_id" json:"strategy_id"`
UploadPkg bool `bson:"upload_pkg" json:"upload_pkg"`
ClusterID string `bson:"cluster_id" json:"cluster_id"`
ClusterSource string `bson:"cluster_source" json:"cluster_source"`
StrategyID string `bson:"strategy_id" json:"strategy_id"`
// UseHostDockerDaemon determines is dockerDaemon on host node is used in pod
UseHostDockerDaemon bool `bson:"use_host_docker_daemon" json:"use_host_docker_daemon"`

Expand Down
23 changes: 12 additions & 11 deletions pkg/microservice/aslan/core/common/repository/models/scanning.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,18 +61,19 @@ func (Scanning) TableName() string {
}

type ScanningAdvancedSetting struct {
ArtifactPaths []string `bson:"artifact_paths" json:"artifact_paths"`
ClusterID string `bson:"cluster_id" json:"cluster_id"`
StrategyID string `bson:"strategy_id" json:"strategy_id"`
Timeout int64 `bson:"timeout" json:"timeout"`
ResReq setting.Request `bson:"res_req" json:"res_req"`
ResReqSpec setting.RequestSpec `bson:"res_req_spec" json:"res_req_spec"`
HookCtl *ScanningHookCtl `bson:"hook_ctl" json:"hook_ctl"`
NotifyCtls []*NotifyCtl `bson:"notify_ctls" json:"notify_ctls"`
Cache *ScanningCacheSetting `bson:"cache" json:"cache"`
ConcurrencyLimit int `bson:"concurrency_limit" json:"concurrency_limit"`
ArtifactPaths []string `bson:"artifact_paths" json:"artifact_paths"`
ClusterID string `bson:"cluster_id" json:"cluster_id"`
ClusterSource string `bson:"cluster_source" json:"cluster_source"`
StrategyID string `bson:"strategy_id" json:"strategy_id"`
Timeout int64 `bson:"timeout" json:"timeout"`
ResReq setting.Request `bson:"res_req" json:"res_req"`
ResReqSpec setting.RequestSpec `bson:"res_req_spec" json:"res_req_spec"`
HookCtl *ScanningHookCtl `bson:"hook_ctl" json:"hook_ctl"`
NotifyCtls []*NotifyCtl `bson:"notify_ctls" json:"notify_ctls"`
Cache *ScanningCacheSetting `bson:"cache" json:"cache"`
ConcurrencyLimit int `bson:"concurrency_limit" json:"concurrency_limit"`
CustomAnnotations []*util.KeyValue `bson:"custom_annotations" json:"custom_annotations"`
CustomLabels []*util.KeyValue `bson:"custom_labels" json:"custom_labels" `
CustomLabels []*util.KeyValue `bson:"custom_labels" json:"custom_labels"`
}

type ScanningHookCtl struct {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ type PreTest struct {
// EnableProxy
EnableProxy bool `bson:"enable_proxy" json:"enable_proxy"`
ClusterID string `bson:"cluster_id" json:"cluster_id"`
ClusterSource string `bson:"cluster_source" json:"cluster_source"`
StrategyID string `bson:"strategy_id" json:"strategy_id"`
ConcurrencyLimit int `bson:"concurrency_limit" json:"concurrency_limit"`
// TODO: Deprecated.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -276,6 +276,7 @@ func (s *WorkflowServiceModule) GetKey() string {
type CustomDeployJobSpec struct {
Namespace string `bson:"namespace" json:"namespace" yaml:"namespace"`
ClusterID string `bson:"cluster_id" json:"cluster_id" yaml:"cluster_id"`
ClusterSource string `bson:"cluster_source" json:"cluster_source" yaml:"cluster_source"`
DockerRegistryID string `bson:"docker_registry_id" json:"docker_registry_id" yaml:"docker_registry_id"`
SkipCheckRunStatus bool `bson:"skip_check_run_status" json:"skip_check_run_status" yaml:"skip_check_run_status"`
// support two sources, runtime/fixed.
Expand Down Expand Up @@ -496,6 +497,7 @@ type ZadigDistributeImageJobSpec struct {
// unit is minute.
Timeout int64 `bson:"timeout" json:"timeout" yaml:"timeout"`
ClusterID string `bson:"cluster_id" json:"cluster_id" yaml:"cluster_id"`
ClusterSource string `bson:"cluster_source" json:"cluster_source" yaml:"cluster_source"`
StrategyID string `bson:"strategy_id" json:"strategy_id" yaml:"strategy_id"`
EnableTargetImageTagRule bool `bson:"enable_target_image_tag_rule" json:"enable_target_image_tag_rule" yaml:"enable_target_image_tag_rule"`
TargetImageTagRule string `bson:"target_image_tag_rule" json:"target_image_tag_rule" yaml:"target_image_tag_rule"`
Expand Down Expand Up @@ -660,6 +662,7 @@ type BlueGreenTarget struct {

type CanaryDeployJobSpec struct {
ClusterID string `bson:"cluster_id" json:"cluster_id" yaml:"cluster_id"`
ClusterSource string `bson:"cluster_source" json:"cluster_source" yaml:"cluster_source"`
Namespace string `bson:"namespace" json:"namespace" yaml:"namespace"`
DockerRegistryID string `bson:"docker_registry_id" json:"docker_registry_id" yaml:"docker_registry_id"`
Targets []*CanaryTarget `bson:"targets" json:"targets" yaml:"targets"`
Expand All @@ -685,6 +688,7 @@ type CanaryTarget struct {

type GrayReleaseJobSpec struct {
ClusterID string `bson:"cluster_id" json:"cluster_id" yaml:"cluster_id"`
ClusterSource string `bson:"cluster_source" json:"cluster_source" yaml:"cluster_source"`
Namespace string `bson:"namespace" json:"namespace" yaml:"namespace"`
DockerRegistryID string `bson:"docker_registry_id" json:"docker_registry_id" yaml:"docker_registry_id"`
FromJob string `bson:"from_job" json:"from_job" yaml:"from_job"`
Expand All @@ -705,6 +709,7 @@ type GrayReleaseTarget struct {

type K8sPatchJobSpec struct {
ClusterID string `bson:"cluster_id" json:"cluster_id" yaml:"cluster_id"`
ClusterSource string `bson:"cluster_source" json:"cluster_source" yaml:"cluster_source"`
Namespace string `bson:"namespace" json:"namespace" yaml:"namespace"`
PatchItems []*PatchItem `bson:"patch_items" json:"patch_items" yaml:"patch_items"`
PatchItemOptions []*PatchItem `bson:"-" json:"patch_item_options" yaml:"patch_item_options"`
Expand Down Expand Up @@ -760,8 +765,9 @@ type PatchItem struct {
}

type GrayRollbackJobSpec struct {
ClusterID string `bson:"cluster_id" json:"cluster_id" yaml:"cluster_id"`
Namespace string `bson:"namespace" json:"namespace" yaml:"namespace"`
ClusterID string `bson:"cluster_id" json:"cluster_id" yaml:"cluster_id"`
ClusterSource string `bson:"cluster_source" json:"cluster_source" yaml:"cluster_source"`
Namespace string `bson:"namespace" json:"namespace" yaml:"namespace"`
// unit is minute.
RollbackTimeout int64 `bson:"rollback_timeout" json:"rollback_timeout" yaml:"rollback_timeout"`
Targets []*GrayRollbackTarget `bson:"targets" json:"targets" yaml:"targets"`
Expand Down Expand Up @@ -794,6 +800,7 @@ type JiraJobSpec struct {
type IstioJobSpec struct {
First bool `bson:"first" json:"first" yaml:"first"`
ClusterID string `bson:"cluster_id" json:"cluster_id" yaml:"cluster_id"`
ClusterSource string `bson:"cluster_source" json:"cluster_source" yaml:"cluster_source"`
FromJob string `bson:"from_job" json:"from_job" yaml:"from_job"`
RegistryID string `bson:"registry_id" json:"registry_id" yaml:"registry_id"`
Namespace string `bson:"namespace" json:"namespace" yaml:"namespace"`
Expand All @@ -805,11 +812,12 @@ type IstioJobSpec struct {
}

type IstioRollBackJobSpec struct {
ClusterID string `bson:"cluster_id" json:"cluster_id" yaml:"cluster_id"`
Namespace string `bson:"namespace" json:"namespace" yaml:"namespace"`
Timeout int64 `bson:"timeout" json:"timeout" yaml:"timeout"`
Targets []*IstioJobTarget `bson:"targets" json:"targets" yaml:"targets"`
TargetOptions []*IstioJobTarget `bson:"-" json:"target_options" yaml:"target_options"`
ClusterID string `bson:"cluster_id" json:"cluster_id" yaml:"cluster_id"`
ClusterSource string `bson:"cluster_source" json:"cluster_source" yaml:"cluster_source"`
Namespace string `bson:"namespace" json:"namespace" yaml:"namespace"`
Timeout int64 `bson:"timeout" json:"timeout" yaml:"timeout"`
Targets []*IstioJobTarget `bson:"targets" json:"targets" yaml:"targets"`
TargetOptions []*IstioJobTarget `bson:"-" json:"target_options" yaml:"target_options"`
}

type UpdateEnvIstioConfigJobSpec struct {
Expand Down Expand Up @@ -1266,6 +1274,7 @@ type JobProperties struct {
Infrastructure string `bson:"infrastructure" json:"infrastructure" yaml:"infrastructure"`
VMLabels []string `bson:"vm_labels" json:"vm_labels" yaml:"vm_labels"`
ClusterID string `bson:"cluster_id" json:"cluster_id" yaml:"cluster_id"`
ClusterSource string `bson:"cluster_source" json:"cluster_source" yaml:"cluster_source"`
StrategyID string `bson:"strategy_id" json:"strategy_id" yaml:"strategy_id"`
BuildOS string `bson:"build_os" json:"build_os" yaml:"build_os,omitempty"`
ImageFrom string `bson:"image_from" json:"image_from" yaml:"image_from,omitempty"`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,16 +65,18 @@ import (
)

const (
BusyBoxImage = "koderover.tencentcloudcr.com/koderover-public/busybox:latest"
ZadigContextDir = "/zadig/"
ZadigLogFile = ZadigContextDir + "zadig.log"
ZadigLifeCycleFile = ZadigContextDir + "lifecycle"
ExecutorResourceVolumeName = "executor-resource"
ExecutorVolumePath = "/executor"
JobExecutorFile = ExecutorVolumePath + "/jobexecutor"
defaultSecretEmail = "bot@koderover.com"
registrySecretSuffix = "-registry-secret"
workflowConfigMapRoleSA = "workflow-cm-sa"
BusyBoxImage = "koderover.tencentcloudcr.com/koderover-public/busybox:latest"
ZadigContextDir = "/zadig/"
ZadigLogFile = ZadigContextDir + "zadig.log"
ZadigLifeCycleFile = ZadigContextDir + "lifecycle"
ExecutorResourceVolumeName = "executor-resource"
ExecutorKubeConfigVolume = "executor-kubeconfig"
ExecutorVolumePath = "/executor"
ExecutorKubeConfigVolumePath = "/root/.kube"
JobExecutorFile = ExecutorVolumePath + "/jobexecutor"
defaultSecretEmail = "bot@koderover.com"
registrySecretSuffix = "-registry-secret"
workflowConfigMapRoleSA = "workflow-cm-sa"

defaultRetryCount = 3
defaultRetryInterval = time.Second * 3
Expand Down Expand Up @@ -361,6 +363,38 @@ func buildJob(jobType, jobImage, jobName, clusterID, currentNamespace string, re
return nil, err
}

var commands []string
var serviceAccountName string

if targetCluster.Type == setting.AgentClusterType {
commands = []string{"/bin/sh", "-c", fmt.Sprintf("cp /app/* %s", ExecutorVolumePath)}

if clusterID != setting.LocalClusterID {
serviceAccountName = config.AgentTypeZadigDefaultServiceAccountName
} else {
controllerRuntimeClient, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID)
if err != nil {
return nil, fmt.Errorf("failed to create client for target cluster %s, err: %s", clusterID, err)
}
deploy, deployExists, err := getter.GetDeployment(config.Namespace(), "aslan", controllerRuntimeClient)
if err != nil || !deployExists {
return nil, fmt.Errorf("failed to find aslan deployment to determine the correct service account, error: %s", err)
}

serviceAccountName = deploy.Spec.Template.Spec.ServiceAccountName
}

} else {
commands = []string{"/bin/sh", "-c", fmt.Sprintf(
`cp /app/* %s &&
cat <<EOF > /root/.kube/config
%s
EOF`,
ExecutorVolumePath,
targetCluster.KubeConfig)}
serviceAccountName = workflowConfigMapRoleSA
}

job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: jobName,
Expand All @@ -383,7 +417,7 @@ func buildJob(jobType, jobImage, jobName, clusterID, currentNamespace string, re
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
ImagePullSecrets: ImagePullSecrets,
ServiceAccountName: workflowConfigMapRoleSA,
ServiceAccountName: serviceAccountName,
InitContainers: []corev1.Container{
{
ImagePullPolicy: corev1.PullIfNotPresent,
Expand All @@ -394,8 +428,12 @@ func buildJob(jobType, jobImage, jobName, clusterID, currentNamespace string, re
Name: ExecutorResourceVolumeName,
MountPath: ExecutorVolumePath,
},
{
Name: ExecutorKubeConfigVolume,
MountPath: ExecutorKubeConfigVolumePath,
},
},
Command: []string{"/bin/sh", "-c", fmt.Sprintf("cp /app/* %s", ExecutorVolumePath)},
Command: commands,
},
},
Containers: []corev1.Container{
Expand Down Expand Up @@ -612,6 +650,10 @@ func getVolumeMounts(configMapMountDir string, userHostDockerDaemon bool) []core
Name: ExecutorResourceVolumeName,
MountPath: ExecutorVolumePath,
})
resp = append(resp, corev1.VolumeMount{
Name: ExecutorKubeConfigVolume,
MountPath: ExecutorKubeConfigVolumePath,
})
if userHostDockerDaemon {
resp = append(resp, corev1.VolumeMount{
Name: "docker-sock",
Expand Down Expand Up @@ -645,6 +687,12 @@ func getVolumes(jobName string, userHostDockerDaemon bool) []corev1.Volume {
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
})
resp = append(resp, corev1.Volume{
Name: ExecutorKubeConfigVolume,
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
})

if userHostDockerDaemon {
resp = append(resp, corev1.Volume{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,12 +113,18 @@ func (j *FreeStyleJob) SetPreset() error {
if err := commonmodels.IToi(j.job.Spec, j.spec); err != nil {
return err
}
j.job.Spec = j.spec

if j.spec.Source == config.SourceFromJob {
j.spec.OriginJobName = j.spec.JobName
} else if j.spec.Source == config.SourceRuntime {
//
}

if j.spec.Properties.ClusterSource == "" {
j.spec.Properties.ClusterSource = "fixed"
}

j.job.Spec = j.spec
return nil
}

Expand Down

0 comments on commit 932fda7

Please sign in to comment.