Skip to content

Commit

Permalink
Enabled errcheck Linter in golangci.yml (#1393)
Browse files Browse the repository at this point in the history
Signed-off-by: Ritikaa96 <ritika@india.nec.com>
  • Loading branch information
Ritikaa96 authored Dec 7, 2020
1 parent b067f8a commit 7ec8077
Show file tree
Hide file tree
Showing 12 changed files with 96 additions and 27 deletions.
2 changes: 1 addition & 1 deletion .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ linters:
- deadcode
- depguard
- dogsled
#- errcheck
- errcheck
#- funlen
- goconst
- gocritic
Expand Down
4 changes: 3 additions & 1 deletion adapter/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,9 @@ func main() {
cmd.Flags().AddGoFlagSet(flag.CommandLine) // make sure we get the klog flags
cmd.Flags().IntVar(&prometheusMetricsPort, "metrics-port", 9022, "Set the port to expose prometheus metrics")
cmd.Flags().StringVar(&prometheusMetricsPath, "metrics-path", "/metrics", "Set the path for the prometheus metrics endpoint")
cmd.Flags().Parse(os.Args)
if err := cmd.Flags().Parse(os.Args); err != nil {
return
}

kedaProvider := cmd.makeProviderOrDie()
cmd.WithExternalMetrics(kedaProvider)
Expand Down
8 changes: 6 additions & 2 deletions controllers/scaledobject_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,9 @@ func (r *ScaledObjectReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error
// ensure Status Conditions are initialized
if !scaledObject.Status.Conditions.AreInitialized() {
conditions := kedav1alpha1.GetInitializedConditions()
kedacontrollerutil.SetStatusConditions(r.Client, reqLogger, scaledObject, conditions)
if err := kedacontrollerutil.SetStatusConditions(r.Client, reqLogger, scaledObject, conditions); err != nil {
return ctrl.Result{}, err
}
}

// reconcile ScaledObject and set status appropriately
Expand All @@ -145,7 +147,9 @@ func (r *ScaledObjectReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error
reqLogger.V(1).Info(msg)
conditions.SetReadyCondition(metav1.ConditionTrue, "ScaledObjectReady", msg)
}
kedacontrollerutil.SetStatusConditions(r.Client, reqLogger, scaledObject, &conditions)
if err := kedacontrollerutil.SetStatusConditions(r.Client, reqLogger, scaledObject, &conditions); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, err
}

Expand Down
21 changes: 17 additions & 4 deletions pkg/metrics/prometheus_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,13 +66,19 @@ func init() {
func (metricsServer PrometheusMetricServer) NewServer(address string, pattern string) {
http.HandleFunc("/healthz", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
_, err := w.Write([]byte("OK"))
if err != nil {
log.Fatalf("Unable to write to serve custom metrics: %v", err)
}
})
log.Printf("Starting metrics server at %v", address)
http.Handle(pattern, promhttp.HandlerFor(registry, promhttp.HandlerOpts{}))

// initialize the total error metric
scalerErrorsTotal.GetMetricWith(prometheus.Labels{})
_, errscaler := scalerErrorsTotal.GetMetricWith(prometheus.Labels{})
if errscaler != nil {
log.Fatalf("Unable to initialize total error metrics as : %v", errscaler)
}

log.Fatal(http.ListenAndServe(address, nil))
}
Expand All @@ -92,7 +98,10 @@ func (metricsServer PrometheusMetricServer) RecordHPAScalerError(namespace strin
return
}
// initialize metric with 0 if not already set
scalerErrorsTotal.GetMetricWith(getLabels(namespace, scaledObject, scaler, scalerIndex, metric))
_, errscaler := scalerErrorsTotal.GetMetricWith(getLabels(namespace, scaledObject, scaler, scalerIndex, metric))
if errscaler != nil {
log.Fatalf("Unable to write to serve custom metrics: %v", errscaler)
}
}

// RecordScalerObjectError counts the number of errors with the scaled object
Expand All @@ -103,7 +112,11 @@ func (metricsServer PrometheusMetricServer) RecordScalerObjectError(namespace st
return
}
// initialize metric with 0 if not already set
scaledObjectErrors.GetMetricWith(labels)
_, errscaledobject := scaledObjectErrors.GetMetricWith(labels)
if errscaledobject != nil {
log.Fatalf("Unable to write to serve custom metrics: %v", errscaledobject)
return
}
}

func getLabels(namespace string, scaledObject string, scaler string, scalerIndex int, metric string) prometheus.Labels {
Expand Down
4 changes: 3 additions & 1 deletion pkg/scalers/artemis_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,9 @@ func (s *artemisScaler) getQueueMessageCount() (int, error) {

defer resp.Body.Close()

json.NewDecoder(resp.Body).Decode(&monitoringInfo)
if err := json.NewDecoder(resp.Body).Decode(&monitoringInfo); err != nil {
return -1, err
}
if resp.StatusCode == 200 && monitoringInfo.Status == 200 {
messageCount = monitoringInfo.MsgCount
} else {
Expand Down
5 changes: 4 additions & 1 deletion pkg/scalers/external_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,10 @@ func (s *externalPushScaler) Run(ctx context.Context, active chan<- bool) {
externalLog.Error(err, "error running internalRun")
return
}
handleIsActiveStream(ctx, s.scaledObjectRef, grpcClient, active)
if err := handleIsActiveStream(ctx, s.scaledObjectRef, grpcClient, active); err != nil {
externalLog.Error(err, "error running internalRun")
return
}
done()
}

Expand Down
5 changes: 4 additions & 1 deletion pkg/scalers/rabbitmq_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,10 @@ func TestGetQueueInfo(t *testing.T) {
}

w.WriteHeader(testData.responseStatus)
w.Write([]byte(testData.response))
_, err := w.Write([]byte(testData.response))
if err != nil {
t.Error("Expect request path to =", testData.response, "but it is", err)
}
}))

resolvedEnv := map[string]string{host: fmt.Sprintf("%s%s", apiStub.URL, vhostPath)}
Expand Down
11 changes: 8 additions & 3 deletions pkg/scalers/stan_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,10 @@ func (s *stanScaler) IsActive(ctx context.Context) (bool, error) {
}

defer resp.Body.Close()
json.NewDecoder(resp.Body).Decode(&s.channelInfo)

if err := json.NewDecoder(resp.Body).Decode(&s.channelInfo); err != nil {
stanLog.Error(err, "Unable to decode channel info as %v", err)
return false, err
}
return s.hasPendingMessage() || s.getMaxMsgLag() > 0, nil
}

Expand Down Expand Up @@ -209,7 +211,10 @@ func (s *stanScaler) GetMetrics(ctx context.Context, metricName string, metricSe
}

defer resp.Body.Close()
json.NewDecoder(resp.Body).Decode(&s.channelInfo)
if err := json.NewDecoder(resp.Body).Decode(&s.channelInfo); err != nil {
stanLog.Error(err, "Unable to decode channel info as %v", err)
return []external_metrics.ExternalMetricValue{}, err
}
totalLag := s.getMaxMsgLag()
stanLog.V(1).Info("Stan scaler: Providing metrics based on totalLag, threshold", "totalLag", totalLag, "lagThreshold", s.metadata.lagThreshold)
metric := external_metrics.ExternalMetricValue{
Expand Down
5 changes: 4 additions & 1 deletion pkg/scaling/executor/scale_jobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@ func (e *scaleExecutor) RequestJobScale(ctx context.Context, scaledJob *kedav1al
logger.V(1).Info("At least one scaler is active")
now := metav1.Now()
scaledJob.Status.LastActiveTime = &now
e.updateLastActiveTime(ctx, logger, scaledJob)
err := e.updateLastActiveTime(ctx, logger, scaledJob)
if err != nil {
logger.Error(err, "Failed to update last active time")
}
e.createJobs(logger, scaledJob, scaleTo, effectiveMaxScale)
} else {
logger.V(1).Info("No change in activity")
Expand Down
19 changes: 15 additions & 4 deletions pkg/scaling/executor/scale_jobs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,11 @@ func TestCleanUpNormalCase(t *testing.T) {

scaleExecutor := getMockScaleExecutor(client)

scaleExecutor.cleanUp(scaledJob)
err := scaleExecutor.cleanUp(scaledJob)
if err != nil {
t.Errorf("Unable to cleanup as: %v", err)
return
}

_, ok := actualDeletedJobName["name2"]
assert.True(t, ok)
Expand Down Expand Up @@ -129,8 +133,11 @@ func TestCleanUpMixedCaseWithSortByTime(t *testing.T) {

scaleExecutor := getMockScaleExecutor(client)

scaleExecutor.cleanUp(scaledJob)

err := scaleExecutor.cleanUp(scaledJob)
if err != nil {
t.Errorf("Unable to cleanup as: %v", err)
return
}
assert.Equal(t, 3, len(actualDeletedJobName))
_, ok := actualDeletedJobName["success2"]
assert.True(t, ok)
Expand Down Expand Up @@ -167,7 +174,11 @@ func TestCleanUpDefaultValue(t *testing.T) {

scaleExecutor := getMockScaleExecutor(client)

scaleExecutor.cleanUp(scaledJob)
err := scaleExecutor.cleanUp(scaledJob)
if err != nil {
t.Errorf("Unable to cleanup as: %v", err)
return
}

assert.Equal(t, 2, len(actualDeletedJobName))
_, ok := actualDeletedJobName["success0"]
Expand Down
31 changes: 25 additions & 6 deletions pkg/scaling/executor/scale_scaledobjects.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,17 +52,27 @@ func (e *scaleExecutor) RequestScale(ctx context.Context, scaledObject *kedav1al
case isActive:
// triggers are active, but we didn't need to scale (replica count > 0)
// Update LastActiveTime to now.
e.updateLastActiveTime(ctx, logger, scaledObject)
err := e.updateLastActiveTime(ctx, logger, scaledObject)
if err != nil {
logger.Error(err, "Error updating last active time")
return
}
default:
logger.V(1).Info("ScaleTarget no change")
}

condition := scaledObject.Status.Conditions.GetActiveCondition()
if condition.IsUnknown() || condition.IsTrue() != isActive {
if isActive {
e.setActiveCondition(ctx, logger, scaledObject, metav1.ConditionTrue, "ScalerActive", "Scaling is performed because triggers are active")
if err := e.setActiveCondition(ctx, logger, scaledObject, metav1.ConditionTrue, "ScalerActive", "Scaling is performed because triggers are active"); err != nil {
logger.Error(err, "Error setting active condition when triggers are active")
return
}
} else {
e.setActiveCondition(ctx, logger, scaledObject, metav1.ConditionFalse, "ScalerNotActive", "Scaling is not performed because triggers are not active")
if err := e.setActiveCondition(ctx, logger, scaledObject, metav1.ConditionFalse, "ScalerNotActive", "Scaling is not performed because triggers are not active"); err != nil {
logger.Error(err, "Error setting active condition when triggers are not active")
return
}
}
}
}
Expand All @@ -87,7 +97,10 @@ func (e *scaleExecutor) scaleToZero(ctx context.Context, logger logr.Logger, sca
err := e.updateScaleOnScaleTarget(ctx, scaledObject, scale)
if err == nil {
logger.Info("Successfully scaled ScaleTarget to 0 replicas")
e.setActiveCondition(ctx, logger, scaledObject, metav1.ConditionFalse, "ScalerNotActive", "Scaling is not performed because triggers are not active")
if err := e.setActiveCondition(ctx, logger, scaledObject, metav1.ConditionFalse, "ScalerNotActive", "Scaling is not performed because triggers are not active"); err != nil {
logger.Error(err, "Error in setting active condition")
return
}
}
} else {
logger.V(1).Info("ScaleTarget cooling down",
Expand All @@ -96,7 +109,10 @@ func (e *scaleExecutor) scaleToZero(ctx context.Context, logger logr.Logger, sca

activeCondition := scaledObject.Status.Conditions.GetActiveCondition()
if !activeCondition.IsFalse() || activeCondition.Reason != "ScalerCooldown" {
e.setActiveCondition(ctx, logger, scaledObject, metav1.ConditionFalse, "ScalerCooldown", "Scaler cooling down because triggers are not active")
if err := e.setActiveCondition(ctx, logger, scaledObject, metav1.ConditionFalse, "ScalerCooldown", "Scaler cooling down because triggers are not active"); err != nil {
logger.Error(err, "Error in setting active condition")
return
}
}
}
}
Expand All @@ -117,7 +133,10 @@ func (e *scaleExecutor) scaleFromZero(ctx context.Context, logger logr.Logger, s
"New Replicas Count", scale.Spec.Replicas)

// Scale was successful. Update lastScaleTime and lastActiveTime on the scaledObject
e.updateLastActiveTime(ctx, logger, scaledObject)
if err := e.updateLastActiveTime(ctx, logger, scaledObject); err != nil {
logger.Error(err, "Error in Updating lastScaleTime and lastActiveTime on the scaledObject")
return
}
}
}

Expand Down
8 changes: 6 additions & 2 deletions pkg/scaling/resolver/scale_resolvers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,12 @@ func TestResolveNonExistingConfigMapsOrSecretsEnv(t *testing.T) {
}

func TestResolveAuthRef(t *testing.T) {
corev1.AddToScheme(scheme.Scheme)
kedav1alpha1.AddToScheme(scheme.Scheme)
if err := corev1.AddToScheme(scheme.Scheme); err != nil {
t.Errorf("Expected Error because: %v", err)
}
if err := kedav1alpha1.AddToScheme(scheme.Scheme); err != nil {
t.Errorf("Expected Error because: %v", err)
}
tests := []struct {
name string
existing []runtime.Object
Expand Down

0 comments on commit 7ec8077

Please sign in to comment.