Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: add a lock to avoid duplicate IP assignments when creating KKInstances concurrently #1670

Merged
merged 2 commits into from
Dec 21, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/kubernetes-auto-support.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ jobs:
wget https://attack-on-titan.gd2.qingstor.com/qsctl/v2.4.3/qsctl_v2.4.3_linux_amd64.tar.gz
tar -zxvf qsctl_v2.4.3_linux_amd64.tar.gz
mv qsctl_v2.4.3_linux_amd64 /usr/local/bin/qsctl
rm -rf qsctl_v2.4.3_linux_amd64.tar.gz

- name: update components.json
id: get_new_version
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ jobs:
rm -rf qsctl_v2.4.3_linux_amd64.tar.gz
wget https://attack-on-titan.gd2.qingstor.com/qsctl/v2.4.3/qsctl_v2.4.3_linux_amd64.tar.gz
tar -zxvf qsctl_v2.4.3_linux_amd64.tar.gz
rm -rf qsctl_v2.4.3_linux_amd64.tar.gz
mv qsctl_v2.4.3_linux_amd64 /usr/local/bin/qsctl
echo "access_key_id: ${{secrets.KS_QSCTL_ACCESS_KEY_ID}}" > qsctl-config.yaml
echo "secret_access_key: ${{ secrets.KS_QSCTL_SECRET_ACCESS_KEY }}" >> qsctl-config.yaml
Expand Down
1 change: 1 addition & 0 deletions controllers/kkinstance/kkinstance_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re

instanceScope, err := scope.NewInstanceScope(scope.InstanceScopeParams{
Client: r.Client,
Logger: &log,
Cluster: cluster,
Machine: machine,
InfraCluster: infraCluster,
Expand Down
4 changes: 1 addition & 3 deletions controllers/kkmachine/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import (
capierrors "sigs.k8s.io/cluster-api/errors"
capiutil "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"

infrav1 "github.com/kubesphere/kubekey/v3/api/v1beta1"
Expand All @@ -37,8 +36,7 @@ import (

func (r *Reconciler) createInstance(ctx context.Context, machineScope *scope.MachineScope,
kkInstanceScope scope.KKInstanceScope) (*infrav1.KKInstance, error) {
log := ctrl.LoggerFrom(ctx)
log.V(4).Info("Creating KKInstance")
machineScope.Info("Creating KKInstance")

if machineScope.Machine.Spec.Version == nil {
err := errors.New("Machine's spec.version must be defined")
Expand Down
10 changes: 9 additions & 1 deletion controllers/kkmachine/kkmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package kkmachine
import (
"context"
"fmt"
"sync"
"time"

"github.com/go-logr/logr"
Expand Down Expand Up @@ -62,6 +63,7 @@ const (
// Reconciler reconciles a KKMachine object
type Reconciler struct {
client.Client
mutex sync.Mutex
Scheme *runtime.Scheme
Recorder record.EventRecorder
Tracker *remote.ClusterCacheTracker
Expand Down Expand Up @@ -163,6 +165,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
// Create the machine scope
machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{
Client: r.Client,
Logger: &log,
Cluster: cluster,
Machine: machine,
InfraCluster: infraCluster,
Expand Down Expand Up @@ -291,7 +294,12 @@ func (r *Reconciler) reconcileNormal(ctx context.Context, machineScope *scope.Ma
}
}

if !r.mutex.TryLock() {
machineScope.V(4).Info("Waiting for the last KKInstance to be created")
return ctrl.Result{RequeueAfter: 2 * time.Second}, nil
}
instance, err = r.createInstance(ctx, machineScope, kkInstanceScope)
r.mutex.Unlock()
if err != nil {
machineScope.Error(err, "unable to create kkInstance")
r.Recorder.Eventf(machineScope.KKMachine, corev1.EventTypeWarning, "FailedCreate", "Failed to create kkInstance: %v", err)
Expand Down Expand Up @@ -379,7 +387,7 @@ func (r *Reconciler) findInstance(ctx context.Context, machineScope *scope.Machi

machineScope.V(4).Info("KKMachine has an instance id", "instance-id", pid.ID())
// If the ProviderID is populated, describe the instance using the ID.
id := pointer.StringPtr(pid.ID())
id := pointer.String(pid.ID())

obj := client.ObjectKey{
Namespace: machineScope.KKMachine.Namespace,
Expand Down
Binary file removed qsctl_v2.4.3_linux_amd64.tar.gz
Binary file not shown.
2 changes: 1 addition & 1 deletion test/e2e/config/e2e_conf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ variables:
KUBERNETES_VERSION_MANAGEMENT: "v1.24.0"
KUBERNETES_VERSION: "v1.24.0"
IMAGE_REPOSITORY: "k8s.gcr.io"
CNI: "./data/cni/calico.yaml"
CNI: "../../data/cni/calico.yaml"
EVENT_BRIDGE_INSTANCE_STATE: "true"
EXP_CLUSTER_RESOURCE_SET: "true"
IP_FAMILY: "IPv4"
Expand Down