From 94a96b8eaafd18c5b3bec1c211647eab5358367f Mon Sep 17 00:00:00 2001 From: Tom Wieczorek Date: Wed, 13 Nov 2024 15:38:45 +0100 Subject: [PATCH] Enable unparam linter ... and fix lints on the way. Signed-off-by: Tom Wieczorek --- .golangci.yml | 1 + internal/pkg/file/atomic_test.go | 6 ++- inttest/bind-address/bind_address_test.go | 6 +-- pkg/applier/manager_test.go | 46 +++++++------------ pkg/autopilot/controller/plans/init.go | 8 ++-- .../controller/updates/periodicupdater.go | 5 +- pkg/autopilot/controller/updates/updater.go | 2 +- pkg/certificate/manager.go | 4 +- .../workerconfig/reconciler_test.go | 26 +++++------ pkg/component/prober/prober.go | 4 +- pkg/kubernetes/watch/watcher_test.go | 4 +- 11 files changed, 50 insertions(+), 62 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 5e18653cfca3..86adcd71b26a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -20,6 +20,7 @@ linters: - revive # Stricter drop-in replacement for golint - testifylint # Checks usage of github.com/stretchr/testify - unconvert # Checks for unnecessary type conversions + - unparam # Checks for unused function parameters linters-settings: depguard: diff --git a/internal/pkg/file/atomic_test.go b/internal/pkg/file/atomic_test.go index db33d9aac5b2..a303ff3adc40 100644 --- a/internal/pkg/file/atomic_test.go +++ b/internal/pkg/file/atomic_test.go @@ -84,10 +84,12 @@ func TestWriteAtomically(t *testing.T) { return } - assertDirEmpty := func(t *testing.T, dir string) bool { + assertDirEmpty := func(t *testing.T, dir string) { t.Helper() entries, err := os.ReadDir(dir) - return assert.NoError(t, err) && assert.Empty(t, entries) + if assert.NoError(t, err) { + assert.Empty(t, entries) + } } t.Run("writeFails", func(t *testing.T) { diff --git a/inttest/bind-address/bind_address_test.go b/inttest/bind-address/bind_address_test.go index 6c1dffa98cc6..0bd48b1d6d5d 100644 --- a/inttest/bind-address/bind_address_test.go +++ b/inttest/bind-address/bind_address_test.go @@ -100,7 +100,7 @@ func (s *suite) TestCustomizedBindAddress() { } s.Require().NoError(eg.Wait()) - s.Require().NoError(s.checkClusterReadiness(ctx, clients, 1)) + s.Require().NoError(s.checkClusterReadiness(ctx, clients)) }) s.Run("join_new_controllers", func() { @@ -117,11 +117,11 @@ func (s *suite) TestCustomizedBindAddress() { s.Require().NoError(err) s.T().Logf("Checking if HA cluster is ready") - s.Require().NoError(s.checkClusterReadiness(ctx, clients, s.ControllerCount)) + s.Require().NoError(s.checkClusterReadiness(ctx, clients)) }) } -func (s *suite) checkClusterReadiness(ctx context.Context, clients *kubernetes.Clientset, numControllers int, degradedControllers ...string) error { +func (s *suite) checkClusterReadiness(ctx context.Context, clients *kubernetes.Clientset) error { eg, ctx := errgroup.WithContext(ctx) eg.Go(func() error { diff --git a/pkg/applier/manager_test.go b/pkg/applier/manager_test.go index 0b24871906a5..eeaf1ea82401 100644 --- a/pkg/applier/manager_test.go +++ b/pkg/applier/manager_test.go @@ -39,7 +39,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/wait" yaml "sigs.k8s.io/yaml/goyaml.v2" @@ -216,13 +215,12 @@ func TestManager(t *testing.T) { writeLabel(t, filepath.Join(dir, "stack1/pod.yaml"), "custom1", "test") t.Log("waiting for pod to be updated") - waitFor(t, 100*time.Millisecond, 5*time.Second, func(ctx context.Context) (bool, error) { + require.EventuallyWithT(t, func(t *assert.CollectT) { r, err := getResource(fakes, *podgv, "kube-system", "applier-test") - if err != nil { - return false, nil + if assert.NoError(t, err) { + assert.Equal(t, "test", r.GetLabels()["custom1"]) } - return r.GetLabels()["custom1"] == "test", nil - }) + }, 5*time.Second, 100*time.Millisecond) // lose and re-acquire leadership le.deactivate() @@ -247,13 +245,12 @@ func TestManager(t *testing.T) { writeLabel(t, filepath.Join(dir, "stack1/pod.yaml"), "custom2", "test") t.Log("waiting for pod to be updated") - waitFor(t, 100*time.Millisecond, 5*time.Second, func(ctx context.Context) (bool, error) { + require.EventuallyWithT(t, func(t *assert.CollectT) { r, err := getResource(fakes, *podgv, "kube-system", "applier-test") - if err != nil { - return false, nil + if assert.NoError(t, err) { + assert.Equal(t, "test", r.GetLabels()["custom2"]) } - return r.GetLabels()["custom2"] == "test", nil - }) + }, 5*time.Second, 100*time.Millisecond) // delete the stack and verify the resources are deleted @@ -261,13 +258,10 @@ func TestManager(t *testing.T) { require.NoError(t, err) t.Log("waiting for pod to be deleted") - waitFor(t, 100*time.Millisecond, 5*time.Second, func(ctx context.Context) (bool, error) { + require.EventuallyWithT(t, func(t *assert.CollectT) { _, err := getResource(fakes, *podgv, "kube-system", "applier-test") - if errors.IsNotFound(err) { - return true, nil - } - return false, nil - }) + assert.Truef(t, errors.IsNotFound(err), "Expected a 'not found' error: %v", err) + }, 5*time.Second, 100*time.Millisecond) } func writeLabel(t *testing.T, file string, key string, value string) { @@ -286,27 +280,19 @@ func writeLabel(t *testing.T, file string, key string, value string) { func waitForResource(t *testing.T, fakes *kubeutil.FakeClientFactory, gv schema.GroupVersionResource, namespace string, name string) { t.Logf("waiting for resource %s/%s", gv.Resource, name) - waitFor(t, 100*time.Millisecond, 5*time.Second, func(ctx context.Context) (bool, error) { + require.EventuallyWithT(t, func(c *assert.CollectT) { _, err := getResource(fakes, gv, namespace, name) - if errors.IsNotFound(err) { - return false, nil - } else if err != nil { - return false, err + if err != nil { + require.Truef(t, errors.IsNotFound(err), "Expected a 'not found' error: %v", err) + assert.NoError(c, err) } - return true, nil - }) + }, 5*time.Second, 100*time.Millisecond) } func getResource(fakes *kubeutil.FakeClientFactory, gv schema.GroupVersionResource, namespace string, name string) (*unstructured.Unstructured, error) { return fakes.DynamicClient.Resource(gv).Namespace(namespace).Get(context.Background(), name, metav1.GetOptions{}) } -func waitFor(t *testing.T, interval, timeout time.Duration, fn wait.ConditionWithContextFunc) { - t.Helper() - err := wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, fn) - require.NoError(t, err) -} - func writeStack(t *testing.T, dst string, src string) { dstStackDir := filepath.Join(dst, path.Base(src)) err := os.MkdirAll(dstStackDir, 0755) diff --git a/pkg/autopilot/controller/plans/init.go b/pkg/autopilot/controller/plans/init.go index 4f02ba58a458..9e6a1c0c9327 100644 --- a/pkg/autopilot/controller/plans/init.go +++ b/pkg/autopilot/controller/plans/init.go @@ -73,7 +73,7 @@ func registerNewPlanStateController(logger *logrus.Entry, mgr crman.Manager, pro providers..., ) - return registerPlanStateController("newplan", logger, mgr, newPlanEventFilter(), handler, providers) + return registerPlanStateController("newplan", logger, mgr, newPlanEventFilter(), handler) } // registerSchedulableWaitStateController registers the 'schedulablewait' plan state controller to @@ -87,7 +87,7 @@ func registerSchedulableWaitStateController(logger *logrus.Entry, mgr crman.Mana providers..., ) - return registerPlanStateController("schedulablewait", logger, mgr, schedulableWaitEventFilter(), handler, providers) + return registerPlanStateController("schedulablewait", logger, mgr, schedulableWaitEventFilter(), handler) } // registerSchedulableStateController registers the 'schedulable' plan state controller to @@ -101,12 +101,12 @@ func registerSchedulableStateController(logger *logrus.Entry, mgr crman.Manager, providers..., ) - return registerPlanStateController("schedulable", logger, mgr, schedulableEventFilter(), handler, providers) + return registerPlanStateController("schedulable", logger, mgr, schedulableEventFilter(), handler) } // registerPlanStateController is a helper for registering a plan state controller into // controller-runtime. -func registerPlanStateController(name string, logger *logrus.Entry, mgr crman.Manager, eventFilter crpred.Predicate, handler appc.PlanStateHandler, providers []appc.PlanCommandProvider) error { +func registerPlanStateController(name string, logger *logrus.Entry, mgr crman.Manager, eventFilter crpred.Predicate, handler appc.PlanStateHandler) error { return cr.NewControllerManagedBy(mgr). Named("planstate-" + name). For(&apv1beta2.Plan{}). diff --git a/pkg/autopilot/controller/updates/periodicupdater.go b/pkg/autopilot/controller/updates/periodicupdater.go index 1aab0b668b81..fae29196b6ac 100644 --- a/pkg/autopilot/controller/updates/periodicupdater.go +++ b/pkg/autopilot/controller/updates/periodicupdater.go @@ -45,8 +45,7 @@ type periodicUpdater struct { ticker *time.Ticker } -func newPeriodicUpdater(ctx context.Context, updateConfig apv1beta2.UpdateConfig, k8sClient crcli.Client, apClientFactory apcli.FactoryInterface, clusterID, currentK0sVersion string) (*periodicUpdater, error) { - +func newPeriodicUpdater(ctx context.Context, updateConfig apv1beta2.UpdateConfig, k8sClient crcli.Client, apClientFactory apcli.FactoryInterface, clusterID, currentK0sVersion string) *periodicUpdater { return &periodicUpdater{ ctx: ctx, log: logrus.WithField("component", "periodic-updater"), @@ -55,7 +54,7 @@ func newPeriodicUpdater(ctx context.Context, updateConfig apv1beta2.UpdateConfig clusterID: clusterID, currentK0sVersion: currentK0sVersion, apClientFactory: apClientFactory, - }, nil + } } func (u *periodicUpdater) Config() *apv1beta2.UpdateConfig { diff --git a/pkg/autopilot/controller/updates/updater.go b/pkg/autopilot/controller/updates/updater.go index 4b43980a1a12..89fadaa3c725 100644 --- a/pkg/autopilot/controller/updates/updater.go +++ b/pkg/autopilot/controller/updates/updater.go @@ -75,7 +75,7 @@ func newUpdater(parentCtx context.Context, updateConfig apv1beta2.UpdateConfig, case apv1beta2.UpdateStrategyTypeCron: return newCronUpdater(parentCtx, updateConfig, k8sClient, clusterID, updateClient) case apv1beta2.UpdateStrategyTypePeriodic: - return newPeriodicUpdater(parentCtx, updateConfig, k8sClient, apClientFactory, clusterID, build.Version) + return newPeriodicUpdater(parentCtx, updateConfig, k8sClient, apClientFactory, clusterID, build.Version), nil default: return nil, fmt.Errorf("unknown update strategy type: %s", updateConfig.Spec.UpgradeStrategy.Type) } diff --git a/pkg/certificate/manager.go b/pkg/certificate/manager.go index 8795503087ce..d86a8e273343 100644 --- a/pkg/certificate/manager.go +++ b/pkg/certificate/manager.go @@ -105,7 +105,7 @@ func (m *Manager) EnsureCertificate(certReq Request, ownerID int) (Certificate, certFile := filepath.Join(m.K0sVars.CertRootDir, fmt.Sprintf("%s.crt", certReq.Name)) // if regenerateCert returns true, it means we need to create the certs - if m.regenerateCert(certReq, keyFile, certFile) { + if m.regenerateCert(keyFile, certFile) { logrus.Debugf("creating certificate %s", certFile) req := csr.CertificateRequest{ KeyRequest: csr.NewKeyRequest(), @@ -191,7 +191,7 @@ func (m *Manager) EnsureCertificate(certReq Request, ownerID int) (Certificate, // if regenerateCert does not need to do any changes, it will return false // if a change in SAN hosts is detected, if will return true, to re-generate certs -func (m *Manager) regenerateCert(certReq Request, keyFile string, certFile string) bool { +func (m *Manager) regenerateCert(keyFile string, certFile string) bool { var cert *certinfo.Certificate var err error diff --git a/pkg/component/controller/workerconfig/reconciler_test.go b/pkg/component/controller/workerconfig/reconciler_test.go index e6018deae1bb..0947ce6658bb 100644 --- a/pkg/component/controller/workerconfig/reconciler_test.go +++ b/pkg/component/controller/workerconfig/reconciler_test.go @@ -373,35 +373,35 @@ func TestReconciler_ResourceGeneration(t *testing.T) { }, })) - configMaps := map[string]func(t *testing.T, expected *kubeletConfig){ - "worker-config-default-1.31": func(t *testing.T, expected *kubeletConfig) { + expectedConfigMaps := map[string]func(expected *kubeletConfig){ + "worker-config-default-1.31": func(expected *kubeletConfig) { expected.CgroupsPerQOS = ptr.To(true) expected.FeatureGates = map[string]bool{"kubelet-feature": true} }, - "worker-config-default-windows-1.31": func(t *testing.T, expected *kubeletConfig) { + "worker-config-default-windows-1.31": func(expected *kubeletConfig) { expected.CgroupsPerQOS = ptr.To(false) expected.FeatureGates = map[string]bool{"kubelet-feature": true} }, - "worker-config-profile_XXX-1.31": func(t *testing.T, expected *kubeletConfig) { + "worker-config-profile_XXX-1.31": func(expected *kubeletConfig) { expected.Authentication.Anonymous.Enabled = ptr.To(true) expected.FeatureGates = map[string]bool{"kubelet-feature": true} }, - "worker-config-profile_YYY-1.31": func(t *testing.T, expected *kubeletConfig) { + "worker-config-profile_YYY-1.31": func(expected *kubeletConfig) { expected.Authentication.Webhook.CacheTTL = metav1.Duration{Duration: 15 * time.Second} expected.FeatureGates = map[string]bool{"kubelet-feature": true} }, } appliedResources := applied() - assert.Len(t, appliedResources, len(configMaps)+2) + assert.Len(t, appliedResources, len(expectedConfigMaps)+2) - for name, mod := range configMaps { + for name, configModFn := range expectedConfigMaps { t.Run(name, func(t *testing.T) { kubelet := requireKubelet(t, appliedResources, name) - expected := makeKubeletConfig(t, func(expected *kubeletConfig) { mod(t, expected) }) + expected := makeKubeletConfig(t, configModFn) assert.JSONEq(t, expected, kubelet) }) } @@ -427,9 +427,9 @@ func TestReconciler_ResourceGeneration(t *testing.T) { require.NoError(t, err) require.True(t, ok, "No resourceNames field") - assert.Len(t, resourceNames, len(configMaps)) - for expected := range configMaps { - assert.Contains(t, resourceNames, expected) + assert.Len(t, resourceNames, len(expectedConfigMaps)) + for name := range expectedConfigMaps { + assert.Contains(t, resourceNames, name) } }) @@ -577,10 +577,10 @@ func TestReconciler_runReconcileLoop(t *testing.T) { updates, firstDone, secondDone := make(chan updateFunc, 2), make(chan error, 1), make(chan error, 1) // Put in the first update. - updates <- func(s *snapshot) chan<- error { return firstDone } + updates <- func(*snapshot) chan<- error { return firstDone } // Put in the second update that'll cancel the context. - updates <- func(s *snapshot) chan<- error { cancel(); return secondDone } + updates <- func(*snapshot) chan<- error { cancel(); return secondDone } underTest.runReconcileLoop(ctx, updates, nil) diff --git a/pkg/component/prober/prober.go b/pkg/component/prober/prober.go index 6ae1eeddeae7..8aff43b6e33c 100644 --- a/pkg/component/prober/prober.go +++ b/pkg/component/prober/prober.go @@ -145,7 +145,7 @@ func (p *Prober) healthCheckLoop(ctx context.Context) { return case at := <-ticker.C: p.l.Debug("Probing components") - p.checkComponentsHealth(ctx, at) + p.checkComponentsHealth(at) // limit amount of iterations for the test purposes if p.stopAfterIterationNum > 0 { epoch++ @@ -156,7 +156,7 @@ func (p *Prober) healthCheckLoop(ctx context.Context) { } } } -func (p *Prober) checkComponentsHealth(ctx context.Context, at time.Time) { +func (p *Prober) checkComponentsHealth(at time.Time) { for name, component := range p.withHealthComponents { p.Lock() if _, ok := p.healthCheckState[name]; !ok { diff --git a/pkg/kubernetes/watch/watcher_test.go b/pkg/kubernetes/watch/watcher_test.go index 25326c462dfc..69e2ce3129e9 100644 --- a/pkg/kubernetes/watch/watcher_test.go +++ b/pkg/kubernetes/watch/watcher_test.go @@ -496,7 +496,7 @@ func TestWatcher(t *testing.T) { return nil } - second = func(opts metav1.ListOptions) error { + second = func(opts metav1.ListOptions) error { // nolint:unparam // error required to satisfy interface assert.Equal(t, "the bookmark", opts.ResourceVersion) assert.True(t, opts.AllowWatchBookmarks) @@ -508,7 +508,7 @@ func TestWatcher(t *testing.T) { return nil } - third = func(opts metav1.ListOptions) error { + third = func(opts metav1.ListOptions) error { // nolint:unparam // error required to satisfy interface assert.Equal(t, "someConfigMap", opts.ResourceVersion) assert.True(t, opts.AllowWatchBookmarks)