Skip to content

Commit

Permalink
Merge pull request #2864 from red-hat-storage/sync_ds--main
Browse files Browse the repository at this point in the history
Syncing latest changes from main for ocs-operator
  • Loading branch information
openshift-merge-bot[bot] authored Oct 19, 2024
2 parents 45a093f + 2a2fdb2 commit e6aa8fe
Show file tree
Hide file tree
Showing 4 changed files with 82 additions and 20 deletions.
6 changes: 3 additions & 3 deletions controllers/storagecluster/topology.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ func setFailureDomain(sc *ocsv1.StorageCluster) {
// If sufficient zones are available then we select zone as the failure domain
topologyMap := sc.Status.NodeTopologies
for label, labelValues := range topologyMap.Labels {
if strings.Contains(label, "zone") {
if label == corev1.LabelZoneFailureDomainStable || label == labelZoneFailureDomainWithoutBeta {
if (len(labelValues) >= 2 && arbiterEnabled(sc)) || (len(labelValues) >= 3) {
failureDomain = "zone"
}
Expand Down Expand Up @@ -135,7 +135,7 @@ func determinePlacementRack(
targetAZ := ""
for label, value := range node.Labels {
for _, key := range validTopologyLabelKeys {
if strings.Contains(label, key) && strings.Contains(label, "zone") {
if strings.Contains(label, key) && (label == corev1.LabelZoneFailureDomainStable || label == labelZoneFailureDomainWithoutBeta) {
targetAZ = value
break
}
Expand All @@ -159,7 +159,7 @@ func determinePlacementRack(
if n.Name == nodeName {
for label, value := range n.Labels {
for _, key := range validTopologyLabelKeys {
if strings.Contains(label, key) && strings.Contains(label, "zone") && value == targetAZ {
if strings.Contains(label, key) && (label == corev1.LabelZoneFailureDomainStable || label == labelZoneFailureDomainWithoutBeta) && value == targetAZ {
validRack = true
break
}
Expand Down
12 changes: 6 additions & 6 deletions hack/install-ocs-operator.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,8 @@ patch_ocs_client_operator_config_configmap() {
while true; do
if oc get cm ocs-client-operator-config -n openshift-storage; then
oc patch cm ocs-client-operator-config -n openshift-storage --type merge -p '{"data":{"DEPLOY_CSI":"false"}}'
sleep 10
value=$(oc get cm ocs-client-operator-config -n openshift-storage -o custom-columns=:data.DEPLOY_CSI --no-headers)
if [[ "$value" == "false" ]]; then
break
fi
sleep 2
fi
sleep 10
done
}

Expand All @@ -56,6 +51,11 @@ patch_ocs_client_operator_config_configmap() {
# configuration and deploying the CSI. This approach allows us to stop the CSI deployment by patching the ConfigMap as soon as it's created.
# We cannot create the ConfigMap early in the process because OLM overwrites it with an empty one later in the cycle.
patch_ocs_client_operator_config_configmap &
# Get the process ID (PID) of the background process
bg_pid=$!
# Trap to kill the process when the script exits
trap 'kill $bg_pid' EXIT

"$OPERATOR_SDK" run bundle "$OCS_CLIENT_BUNDLE_FULL_IMAGE_NAME" --timeout=10m --security-context-config restricted -n "$INSTALL_NAMESPACE"
"$OPERATOR_SDK" run bundle "$NOOBAA_BUNDLE_FULL_IMAGE_NAME" --timeout=10m --security-context-config restricted -n "$INSTALL_NAMESPACE"
"$OPERATOR_SDK" run bundle "$BUNDLE_FULL_IMAGE_NAME" --timeout=10m --security-context-config restricted -n "$INSTALL_NAMESPACE"
Expand Down
75 changes: 64 additions & 11 deletions services/provider/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,15 @@ import (
"encoding/json"
"encoding/pem"
"fmt"
"k8s.io/utils/ptr"
"math"
"net"
"slices"
"strconv"
"strings"
"time"

"k8s.io/utils/ptr"

"github.com/blang/semver/v4"
nbv1 "github.com/noobaa/noobaa-operator/v5/pkg/apis/noobaa/v1alpha1"
quotav1 "github.com/openshift/api/quota/v1"
Expand Down Expand Up @@ -193,7 +194,17 @@ func (s *OCSProviderServer) GetStorageConfig(ctx context.Context, req *pb.Storag
if err != nil {
return nil, status.Errorf(codes.Internal, "Failed to construct status response: %v", err)
}
desiredClientConfigHash := getDesiredClientConfigHash(channelName, consumerObj)

storageCluster, err := s.getStorageCluster(ctx)
if err != nil {
return nil, err
}

desiredClientConfigHash := getDesiredClientConfigHash(
channelName,
consumerObj,
isEncryptionInTransitEnabled(storageCluster.Spec.Network),
)

klog.Infof("successfully returned the config details to the consumer.")
return &pb.StorageConfigResponse{
Expand Down Expand Up @@ -751,15 +762,12 @@ func (s *OCSProviderServer) GetStorageClaimConfig(ctx context.Context, req *pb.S
"csi.storage.k8s.io/controller-expand-secret-name": provisionerSecretName,
}

storageClusters := &ocsv1.StorageClusterList{}
if err := s.client.List(ctx, storageClusters, client.InNamespace(s.namespace), client.Limit(2)); err != nil {
return nil, status.Errorf(codes.Internal, "failed to get storage cluster: %v", err)
}
if len(storageClusters.Items) != 1 {
return nil, status.Errorf(codes.Internal, "expecting one single storagecluster to exist")
storageCluster, err := s.getStorageCluster(ctx)
if err != nil {
return nil, err
}
var kernelMountOptions map[string]string
for _, option := range strings.Split(util.GetCephFSKernelMountOptions(&storageClusters.Items[0]), ",") {
for _, option := range strings.Split(util.GetCephFSKernelMountOptions(storageCluster), ",") {
if kernelMountOptions == nil {
kernelMountOptions = map[string]string{}
}
Expand Down Expand Up @@ -847,18 +855,28 @@ func (s *OCSProviderServer) ReportStatus(ctx context.Context, req *pb.ReportStat
return nil, status.Errorf(codes.Internal, "Failed to construct status response: %v", err)
}

desiredClientConfigHash := getDesiredClientConfigHash(channelName, storageConsumer)
storageCluster, err := s.getStorageCluster(ctx)
if err != nil {
return nil, err
}

desiredClientConfigHash := getDesiredClientConfigHash(
channelName,
storageConsumer,
isEncryptionInTransitEnabled(storageCluster.Spec.Network),
)

return &pb.ReportStatusResponse{
DesiredClientOperatorChannel: channelName,
DesiredConfigHash: desiredClientConfigHash,
}, nil
}

func getDesiredClientConfigHash(channelName string, storageConsumer *ocsv1alpha1.StorageConsumer) string {
func getDesiredClientConfigHash(channelName string, storageConsumer *ocsv1alpha1.StorageConsumer, encryptionInTransit bool) string {
var arr = []any{
channelName,
storageConsumer.Spec.StorageQuotaInGiB,
encryptionInTransit,
}
return util.CalculateMD5Hash(arr)
}
Expand All @@ -878,6 +896,41 @@ func (s *OCSProviderServer) getOCSSubscriptionChannel(ctx context.Context) (stri
return subscription.Spec.Channel, nil
}

func (s *OCSProviderServer) getStorageCluster(ctx context.Context) (*ocsv1.StorageCluster, error) {
scList := &ocsv1.StorageClusterList{}
if err := s.client.List(ctx, scList, client.InNamespace(s.namespace)); err != nil {
return nil, status.Errorf(codes.Internal, "failed to list storage clusters: %v", err)
}

var foundSc *ocsv1.StorageCluster
for i := range scList.Items {
sc := &scList.Items[i]
if sc.Status.Phase == util.PhaseIgnored {
continue // Skip Ignored storage cluster
}
if sc.Spec.AllowRemoteStorageConsumers {
if foundSc != nil {
// This means we have already found one storage cluster, so this is a second one
return nil, status.Errorf(codes.FailedPrecondition, "multiple provider storage clusters found")
}
foundSc = sc
}
}

if foundSc == nil {
return nil, status.Errorf(codes.NotFound, "no provider storage cluster found")
}

return foundSc, nil
}

func isEncryptionInTransitEnabled(networkSpec *rookCephv1.NetworkSpec) bool {
return networkSpec != nil &&
networkSpec.Connections != nil &&
networkSpec.Connections.Encryption != nil &&
networkSpec.Connections.Encryption.Enabled
}

func extractMonitorIps(data string) ([]string, error) {
var ips []string
mons := strings.Split(data, ",")
Expand Down
9 changes: 9 additions & 0 deletions services/provider/server/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,15 @@ func TestGetExternalResources(t *testing.T) {
ocsSubscription.Spec = ocsSubscriptionSpec
assert.NoError(t, client.Create(ctx, ocsSubscription))

storageCluster := &ocsv1.StorageCluster{
Spec: ocsv1.StorageClusterSpec{
AllowRemoteStorageConsumers: true,
},
}
storageCluster.Name = "test-storagecluster"
storageCluster.Namespace = serverNamespace
assert.NoError(t, client.Create(ctx, storageCluster))

// When ocsv1alpha1.StorageConsumerStateReady
req := pb.StorageConfigRequest{
StorageConsumerUUID: string(consumerResource.UID),
Expand Down

0 comments on commit e6aa8fe

Please sign in to comment.