diff --git a/cmd/maintenance-manager/main.go b/cmd/maintenance-manager/main.go index 2e37402..e65d5ba 100644 --- a/cmd/maintenance-manager/main.go +++ b/cmd/maintenance-manager/main.go @@ -148,11 +148,9 @@ func main() { ctx := ctrl.SetupSignalHandler() mgrClient := mgr.GetClient() - nmrOptions := controller.NewNodeMaintenanceReconcilerOptions() if err = (&controller.NodeMaintenanceReconciler{ Client: mgrClient, Scheme: mgr.GetScheme(), - Options: nmrOptions, CordonHandler: cordon.NewCordonHandler(mgrClient, k8sInterface), WaitPodCompletionHandler: podcompletion.NewPodCompletionHandler(mgrClient), DrainManager: drain.NewManager(ctrl.Log.WithName("DrainManager"), ctx, k8sInterface), @@ -161,6 +159,14 @@ func main() { os.Exit(1) } + gcOptions := controller.NewGarbageCollectorOptions() + gcLog := ctrl.Log.WithName("NodeMaintenanceGarbageCollector") + if err = controller.NewNodeMaintenanceGarbageCollector( + mgrClient, gcOptions, gcLog).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "NodeMaintenanceGarbageCollector") + os.Exit(1) + } + nmsrOptions := controller.NewNodeMaintenanceSchedulerReconcilerOptions() nmsrLog := ctrl.Log.WithName("NodeMaintenanceScheduler") if err = (&controller.NodeMaintenanceSchedulerReconciler{ @@ -175,10 +181,10 @@ func main() { } if err = (&controller.MaintenanceOperatorConfigReconciler{ - Client: mgrClient, - Scheme: mgr.GetScheme(), - NodeMaintenanceReconcierOptions: nmrOptions, - SchedulerReconcierOptions: nmsrOptions, + Client: mgrClient, + Scheme: mgr.GetScheme(), + GarbageCollectorOptions: gcOptions, + SchedulerReconcierOptions: nmsrOptions, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "MaintenanceOperatorConfig") os.Exit(1) diff --git a/internal/controller/garbage_collector_controller.go b/internal/controller/garbage_collector_controller.go new file mode 100644 index 0000000..82f30b0 --- /dev/null +++ b/internal/controller/garbage_collector_controller.go @@ -0,0 +1,169 @@ +/* + Copyright 2024, NVIDIA CORPORATION & AFFILIATES + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package controller + +import ( + "context" + "sync" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + maintenancev1 "github.com/Mellanox/maintenance-operator/api/v1alpha1" + "github.com/Mellanox/maintenance-operator/internal/log" +) + +var ( + defaultMaxNodeMaintenanceTime = 1600 * time.Second + defaultGarbageCollectionReconcileTime = 5 * time.Minute + garbageCollectionReconcileTime = defaultGarbageCollectionReconcileTime +) + +// GarbageCollectIgnoreAnnotation garbage collector will skip NodeMaintenance with this annotation. +const GarbageCollectIgnoreAnnotation = "maintenance.nvidia.com/garbage-collector.ignore" + +// NewGarbageCollectorOptions creates new *GarbageCollectorOptions +func NewGarbageCollectorOptions() *GarbageCollectorOptions { + return &GarbageCollectorOptions{ + pendingMaxNodeMaintenanceTime: defaultMaxNodeMaintenanceTime, + maxNodeMaintenanceTime: defaultMaxNodeMaintenanceTime, + } +} + +// GarbageCollectorOptions are options for GarbageCollector where values +// are stored by external entity and read by GarbageCollector. +type GarbageCollectorOptions struct { + sync.Mutex + + pendingMaxNodeMaintenanceTime time.Duration + maxNodeMaintenanceTime time.Duration +} + +// Store maxNodeMaintenanceTime +func (gco *GarbageCollectorOptions) Store(maxNodeMaintenanceTime time.Duration) { + gco.Lock() + defer gco.Unlock() + + gco.pendingMaxNodeMaintenanceTime = maxNodeMaintenanceTime +} + +// Load loads the last Stored options +func (gco *GarbageCollectorOptions) Load() { + gco.Lock() + defer gco.Unlock() + + gco.maxNodeMaintenanceTime = gco.pendingMaxNodeMaintenanceTime +} + +// MaxNodeMaintenanceTime returns the last loaded MaxUnavailable option +func (gco *GarbageCollectorOptions) MaxNodeMaintenanceTime() time.Duration { + return gco.maxNodeMaintenanceTime +} + +// NewNodeMaintenanceGarbageCollector creates a new NodeMaintenanceGarbageCollector +func NewNodeMaintenanceGarbageCollector(kClient client.Client, options *GarbageCollectorOptions, log logr.Logger) *NodeMaintenanceGarbageCollector { + return &NodeMaintenanceGarbageCollector{ + Client: kClient, + options: options, + log: log, + } +} + +// NodeMaintenanceGarbageCollector performs garbage collection for NodeMaintennace +type NodeMaintenanceGarbageCollector struct { + client.Client + + options *GarbageCollectorOptions + log logr.Logger +} + +// SetupWithManager sets up NodeMaintenanceGarbageCollector with controller manager +func (r *NodeMaintenanceGarbageCollector) SetupWithManager(mgr ctrl.Manager) error { + return mgr.Add(r) +} + +// Reconcile collects garabage once +func (r *NodeMaintenanceGarbageCollector) Reconcile(ctx context.Context) error { + r.log.Info("periodic reconcile start") + r.options.Load() + r.log.V(log.DebugLevel).Info("loaded options", "maxNodeMaintenanceTime", r.options.MaxNodeMaintenanceTime()) + + mnl := &maintenancev1.NodeMaintenanceList{} + err := r.List(ctx, mnl) + if err != nil { + return errors.Wrap(err, "failed to list NodeMaintenance") + } + + timeNow := time.Now() + for _, nm := range mnl.Items { + // skip NodeMaintenance with + nmLog := r.log.WithValues("namespace", nm.Namespace, "name", nm.Name) + + if nm.Annotations[GarbageCollectIgnoreAnnotation] == "true" { + nmLog.Info("skipping NodeMaintenance due to ignore annotation") + continue + } + + if nm.Annotations[ReadyTimeAnnotation] != "" { + readyTime, err := time.Parse(time.RFC3339, nm.Annotations[ReadyTimeAnnotation]) + if err != nil { + nmLog.Error(err, "failed to parse ready-time annotation for NodeMaintenenace") + continue + } + if timeNow.After(readyTime.Add(r.options.MaxNodeMaintenanceTime())) { + nmLog.Info("NodeMaintenance is due for garbage collection") + if nm.GetDeletionTimestamp().IsZero() { + nmLog.Info("deleting NodeMaintenance") + if err = r.Delete(ctx, &nm); err != nil { + nmLog.Error(err, "failed to delete NodeMaintenance") + } + } else { + r.log.V(log.DebugLevel).Info("deletion timestamp already set for NodeMaintenance") + } + } + } + } + + r.log.Info("periodic reconcile end") + return nil +} + +// Start NodeMaintenanceGarbageCollector +func (r *NodeMaintenanceGarbageCollector) Start(ctx context.Context) error { + r.log.Info("NodeMaintenanceGarbageCollector Start") + + t := time.NewTicker(garbageCollectionReconcileTime) + defer t.Stop() + +OUTER: + for { + select { + case <-ctx.Done(): + break OUTER + case <-t.C: + err := r.Reconcile(ctx) + if err != nil { + r.log.Error(err, "failed to run reconcile") + } + } + } + + return nil +} diff --git a/internal/controller/garbage_collector_controller_test.go b/internal/controller/garbage_collector_controller_test.go new file mode 100644 index 0000000..476717b --- /dev/null +++ b/internal/controller/garbage_collector_controller_test.go @@ -0,0 +1,180 @@ +/* + Copyright 2024, NVIDIA CORPORATION & AFFILIATES + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package controller + +import ( + "context" + "sync" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + maintenancev1 "github.com/Mellanox/maintenance-operator/api/v1alpha1" + "github.com/Mellanox/maintenance-operator/internal/testutils" +) + +var _ = Describe("NodeMaintenance Controller", func() { + Context("Envtests", func() { + var nmObjectsToCleanup []*maintenancev1.NodeMaintenance + var reconciler *NodeMaintenanceGarbageCollector + var options *GarbageCollectorOptions + // test context, TODO(adrianc): use ginkgo spec context + var testCtx context.Context + + BeforeEach(func() { + testCtx = context.Background() + garbageCollectionReconcileTime = 100 * time.Millisecond + DeferCleanup(func() { + garbageCollectionReconcileTime = defaultGarbageCollectionReconcileTime + }) + + // create controller manager + By("create controller manager") + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: k8sClient.Scheme(), + Metrics: metricsserver.Options{BindAddress: "0"}, + }) + Expect(err).ToNot(HaveOccurred()) + + // create reconciler + By("create NodeMaintenanceGarbageCollector") + options = NewGarbageCollectorOptions() + options.Store(1 * time.Second) + reconciler = NewNodeMaintenanceGarbageCollector( + k8sClient, options, ctrllog.Log.WithName("NodeMaintenanceGarbageCollector")) + + // setup reconciler with manager + By("setup NodeMaintenanceGarbageCollector with controller manager") + Expect(reconciler.SetupWithManager(mgr)). + ToNot(HaveOccurred()) + + // start manager + testMgrCtx, cancel := context.WithCancel(testCtx) + By("start manager") + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + defer GinkgoRecover() + By("Start controller manager") + err := mgr.Start(testMgrCtx) + Expect(err).ToNot(HaveOccurred()) + }() + + DeferCleanup(func() { + By("Shut down controller manager") + cancel() + wg.Wait() + }) + }) + + AfterEach(func() { + By("Cleanup NodeMaintenance resources") + for _, nm := range nmObjectsToCleanup { + err := k8sClient.Delete(testCtx, nm) + if err != nil && k8serrors.IsNotFound(err) { + err = nil + } + Expect(err).ToNot(HaveOccurred()) + } + By("Wait for NodeMaintenance resources to be deleted") + for _, nm := range nmObjectsToCleanup { + Eventually(func() bool { + err := k8sClient.Get(testCtx, types.NamespacedName{Namespace: nm.Namespace, Name: nm.Name}, nm) + if err != nil && k8serrors.IsNotFound(err) { + return true + } + return false + + }).WithTimeout(10 * time.Second).WithPolling(1 * time.Second).Should(BeTrue()) + } + nmObjectsToCleanup = make([]*maintenancev1.NodeMaintenance, 0) + }) + + It("Should Delete NodeMaintenance with ready time annotation", func() { + nm := testutils.GetTestNodeMaintenance("test-nm", "test-node-0", "some-operator.nvidia.com", "") + metav1.SetMetaDataAnnotation(&nm.ObjectMeta, ReadyTimeAnnotation, time.Now().UTC().Format(time.RFC3339)) + Expect(k8sClient.Create(testCtx, nm)).ToNot(HaveOccurred()) + nmObjectsToCleanup = append(nmObjectsToCleanup, nm) + + By("Consistently NodeMaintenance exists") + Consistently(k8sClient.Get(testCtx, client.ObjectKeyFromObject(nm), nm)). + Within(500 * time.Millisecond). + WithPolling(100 * time.Millisecond). + Should(Succeed()) + + By("Eventually NodeMaintenance is deleted") + Eventually(func() bool { + err := k8sClient.Get(testCtx, client.ObjectKeyFromObject(nm), nm) + if err != nil && k8serrors.IsNotFound(err) { + return true + } + return false + }). + WithTimeout(1 * time.Second). + Should(BeTrue()) + }) + + It("should not delete NodeMaintenance with ready time annotation if ignore garbage collection annotation set", func() { + nm := testutils.GetTestNodeMaintenance("test-nm", "test-node-0", "some-operator.nvidia.com", "") + metav1.SetMetaDataAnnotation(&nm.ObjectMeta, ReadyTimeAnnotation, time.Now().UTC().Format(time.RFC3339)) + metav1.SetMetaDataAnnotation(&nm.ObjectMeta, GarbageCollectIgnoreAnnotation, "true") + Expect(k8sClient.Create(testCtx, nm)).ToNot(HaveOccurred()) + nmObjectsToCleanup = append(nmObjectsToCleanup, nm) + + By("Consistently NodeMaintenance exists") + Consistently(k8sClient.Get(testCtx, client.ObjectKeyFromObject(nm), nm)). + Within(2 * time.Second). + WithPolling(500 * time.Millisecond). + Should(Succeed()) + }) + + It("should not delete NodeMaintenance without ready time annotation", func() { + nm := testutils.GetTestNodeMaintenance("test-nm", "test-node-0", "some-operator.nvidia.com", "") + Expect(k8sClient.Create(testCtx, nm)).ToNot(HaveOccurred()) + nmObjectsToCleanup = append(nmObjectsToCleanup, nm) + + By("Consistently NodeMaintenance exists") + Consistently(k8sClient.Get(testCtx, client.ObjectKeyFromObject(nm), nm)). + Within(2 * time.Second). + WithPolling(500 * time.Millisecond). + Should(Succeed()) + }) + }) + + Context("UnitTests", func() { + Context("GarbageCollectorOptions", func() { + It("Works", func() { + options := NewGarbageCollectorOptions() + Expect(options.MaxNodeMaintenanceTime()).To(Equal(defaultMaxNodeMaintenanceTime)) + newTime := 300 * time.Second + options.Store(newTime) + Expect(options.MaxNodeMaintenanceTime()).To(Equal(defaultMaxNodeMaintenanceTime)) + options.Load() + Expect(options.MaxNodeMaintenanceTime()).To(Equal(newTime)) + }) + }) + }) +}) diff --git a/internal/controller/maintenanceoperatorconfig_controller.go b/internal/controller/maintenanceoperatorconfig_controller.go index d9d0aca..ca0b59d 100644 --- a/internal/controller/maintenanceoperatorconfig_controller.go +++ b/internal/controller/maintenanceoperatorconfig_controller.go @@ -40,8 +40,8 @@ type MaintenanceOperatorConfigReconciler struct { client.Client Scheme *runtime.Scheme - SchedulerReconcierOptions *NodeMaintenanceSchedulerReconcilerOptions - NodeMaintenanceReconcierOptions *NodeMaintenanceReconcilerOptions + SchedulerReconcierOptions *NodeMaintenanceSchedulerReconcilerOptions + GarbageCollectorOptions *GarbageCollectorOptions } //+kubebuilder:rbac:groups=maintenance.nvidia.com,resources=maintenanceoperatorconfigs,verbs=get;list;watch;create;update;patch;delete @@ -75,7 +75,7 @@ func (r *MaintenanceOperatorConfigReconciler) Reconcile(ctx context.Context, req "MaxParallelOperations", cfg.Spec.MaxParallelOperations) r.SchedulerReconcierOptions.Store(cfg.Spec.MaxUnavailable, cfg.Spec.MaxParallelOperations) reqLog.Info("store nodeMaintenance reconciler options", "MaxNodeMaintenanceTimeSeconds", cfg.Spec.MaxNodeMaintenanceTimeSeconds) - r.NodeMaintenanceReconcierOptions.Store(time.Second * time.Duration(cfg.Spec.MaxNodeMaintenanceTimeSeconds)) + r.GarbageCollectorOptions.Store(time.Second * time.Duration(cfg.Spec.MaxNodeMaintenanceTimeSeconds)) // handle log level reqLog.Info("setting operator log level", "LogLevel", cfg.Spec.LogLevel) diff --git a/internal/controller/maintenanceoperatorconfig_controller_test.go b/internal/controller/maintenanceoperatorconfig_controller_test.go index 59f527e..0fc2f49 100644 --- a/internal/controller/maintenanceoperatorconfig_controller_test.go +++ b/internal/controller/maintenanceoperatorconfig_controller_test.go @@ -52,10 +52,10 @@ var _ = Describe("MaintenanceOperatorConfig Controller", func() { // create reconciler By("create MaintenanceOperatorConfigReconciler") reconciler = &MaintenanceOperatorConfigReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - SchedulerReconcierOptions: NewNodeMaintenanceSchedulerReconcilerOptions(), - NodeMaintenanceReconcierOptions: NewNodeMaintenanceReconcilerOptions(), + Client: k8sClient, + Scheme: k8sClient.Scheme(), + SchedulerReconcierOptions: NewNodeMaintenanceSchedulerReconcilerOptions(), + GarbageCollectorOptions: NewGarbageCollectorOptions(), } // setup reconciler with manager @@ -94,10 +94,10 @@ var _ = Describe("MaintenanceOperatorConfig Controller", func() { Consistently(func(g Gomega) { reconciler.SchedulerReconcierOptions.Load() - reconciler.NodeMaintenanceReconcierOptions.Load() + reconciler.GarbageCollectorOptions.Load() g.Expect(reconciler.SchedulerReconcierOptions.MaxParallelOperations()).To(Equal(&intstr.IntOrString{Type: intstr.Int, IntVal: 1})) g.Expect(reconciler.SchedulerReconcierOptions.MaxUnavailable()).To(BeNil()) - g.Expect(reconciler.NodeMaintenanceReconcierOptions.MaxNodeMaintenanceTime()).To(Equal(defaultMaxNodeMaintenanceTime)) + g.Expect(reconciler.GarbageCollectorOptions.MaxNodeMaintenanceTime()).To(Equal(defaultMaxNodeMaintenanceTime)) }).ProbeEvery(100 * time.Millisecond).Within(time.Second).Should(Succeed()) }) @@ -120,10 +120,10 @@ var _ = Describe("MaintenanceOperatorConfig Controller", func() { By("check MaintenanceOperatorConfig values were updated") Eventually(func(g Gomega) { reconciler.SchedulerReconcierOptions.Load() - reconciler.NodeMaintenanceReconcierOptions.Load() + reconciler.GarbageCollectorOptions.Load() g.Expect(reconciler.SchedulerReconcierOptions.MaxParallelOperations()).To(Equal(oc.Spec.MaxParallelOperations)) g.Expect(reconciler.SchedulerReconcierOptions.MaxUnavailable()).To(Equal(oc.Spec.MaxUnavailable)) - g.Expect(reconciler.NodeMaintenanceReconcierOptions.MaxNodeMaintenanceTime()). + g.Expect(reconciler.GarbageCollectorOptions.MaxNodeMaintenanceTime()). To(Equal(time.Second * time.Duration(oc.Spec.MaxNodeMaintenanceTimeSeconds))) g.Expect(operatorlog.GetLogLevel()).To(BeEquivalentTo(oc.Spec.LogLevel)) }).ProbeEvery(100 * time.Millisecond).Within(time.Second).Should(Succeed()) @@ -137,10 +137,10 @@ var _ = Describe("MaintenanceOperatorConfig Controller", func() { By("check MaintenanceOperatorConfig values were updated") Eventually(func(g Gomega) { reconciler.SchedulerReconcierOptions.Load() - reconciler.NodeMaintenanceReconcierOptions.Load() + reconciler.GarbageCollectorOptions.Load() g.Expect(reconciler.SchedulerReconcierOptions.MaxParallelOperations()).To(Equal(oc.Spec.MaxParallelOperations)) g.Expect(reconciler.SchedulerReconcierOptions.MaxUnavailable()).To(Equal(oc.Spec.MaxUnavailable)) - g.Expect(reconciler.NodeMaintenanceReconcierOptions.MaxNodeMaintenanceTime()). + g.Expect(reconciler.GarbageCollectorOptions.MaxNodeMaintenanceTime()). To(Equal(time.Second * time.Duration(oc.Spec.MaxNodeMaintenanceTimeSeconds))) g.Expect(operatorlog.GetLogLevel()).To(BeEquivalentTo(oc.Spec.LogLevel)) }).ProbeEvery(100 * time.Millisecond).Within(time.Second).Should(Succeed()) diff --git a/internal/controller/nodemaintenance_controller.go b/internal/controller/nodemaintenance_controller.go index c55da0a..cd98134 100644 --- a/internal/controller/nodemaintenance_controller.go +++ b/internal/controller/nodemaintenance_controller.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "reflect" - "sync" "time" "github.com/go-logr/logr" @@ -50,7 +49,6 @@ import ( ) var ( - defaultMaxNodeMaintenanceTime = 1600 * time.Second waitPodCompletionRequeueTime = 10 * time.Second drainReqeueTime = 10 * time.Second additionalRequestorsRequeueTime = 10 * time.Second @@ -61,51 +59,12 @@ const ( ReadyTimeAnnotation = "maintenance.nvidia.com/ready-time" ) -// NewNodeMaintenanceReconcilerOptions creates new *NodeMaintenanceReconcilerOptions -func NewNodeMaintenanceReconcilerOptions() *NodeMaintenanceReconcilerOptions { - return &NodeMaintenanceReconcilerOptions{ - pendingMaxNodeMaintenanceTime: defaultMaxNodeMaintenanceTime, - maxNodeMaintenanceTime: defaultMaxNodeMaintenanceTime, - } -} - -// NodeMaintenanceReconcilerOptions are options for NodeMaintenanceReconciler where values -// are stored by external entity and read by NodeMaintenanceReconciler. -type NodeMaintenanceReconcilerOptions struct { - sync.Mutex - - pendingMaxNodeMaintenanceTime time.Duration - maxNodeMaintenanceTime time.Duration -} - -// Store maxUnavailable, maxParallelOperations options for NodeMaintenanceReconciler -func (nmro *NodeMaintenanceReconcilerOptions) Store(maxNodeMaintenanceTime time.Duration) { - nmro.Lock() - defer nmro.Unlock() - - nmro.pendingMaxNodeMaintenanceTime = maxNodeMaintenanceTime -} - -// Load loads the last Stored options -func (nmro *NodeMaintenanceReconcilerOptions) Load() { - nmro.Lock() - defer nmro.Unlock() - - nmro.maxNodeMaintenanceTime = nmro.pendingMaxNodeMaintenanceTime -} - -// MaxNodeMaintenanceTime returns the last loaded MaxUnavailable option -func (nmro *NodeMaintenanceReconcilerOptions) MaxNodeMaintenanceTime() time.Duration { - return nmro.maxNodeMaintenanceTime -} - // NodeMaintenanceReconciler reconciles a NodeMaintenance object type NodeMaintenanceReconciler struct { client.Client Scheme *runtime.Scheme EventRecorder record.EventRecorder - Options *NodeMaintenanceReconcilerOptions CordonHandler cordon.Handler WaitPodCompletionHandler podcompletion.Handler DrainManager drain.Manager @@ -127,12 +86,8 @@ type NodeMaintenanceReconciler struct { func (r *NodeMaintenanceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { reqLog := log.FromContext(ctx) reqLog.Info("got request", "name", req.NamespacedName) - var err error - - // load any stored options - r.Options.Load() - reqLog.Info("loaded options", "maxNodeMaintenanceTime", r.Options.MaxNodeMaintenanceTime()) reqLog.Info("outstanding drain requests", "num", len(r.DrainManager.ListRequests())) + var err error // get NodeMaintenance object nm := &maintenancev1.NodeMaintenance{} diff --git a/internal/controller/nodemaintenance_controller_test.go b/internal/controller/nodemaintenance_controller_test.go index 059b266..50e1f3a 100644 --- a/internal/controller/nodemaintenance_controller_test.go +++ b/internal/controller/nodemaintenance_controller_test.go @@ -47,7 +47,6 @@ var _ = Describe("NodeMaintenance Controller", func() { var podObjectsToCleanup []*corev1.Pod var reconciler *NodeMaintenanceReconciler - var options *NodeMaintenanceReconcilerOptions // test context, TODO(adrianc): use ginkgo spec context var testCtx context.Context @@ -72,11 +71,9 @@ var _ = Describe("NodeMaintenance Controller", func() { // create reconciler By("create NodeMaintenanceReconciler") - options = NewNodeMaintenanceReconcilerOptions() reconciler = &NodeMaintenanceReconciler{ Client: k8sClient, Scheme: k8sClient.Scheme(), - Options: options, CordonHandler: cordon.NewCordonHandler(k8sClient, k8sInterface), WaitPodCompletionHandler: podcompletion.NewPodCompletionHandler(k8sClient), DrainManager: drain.NewManager(ctrllog.Log.WithName("DrainManager"), @@ -280,18 +277,4 @@ var _ = Describe("NodeMaintenance Controller", func() { Expect(k8serrors.IsNotFound(err)).To(BeTrue()) }) }) - - Context("UnitTests", func() { - Context("NodeMaintenanceReconcilerOptions", func() { - It("Works", func() { - options := NewNodeMaintenanceReconcilerOptions() - Expect(options.MaxNodeMaintenanceTime()).To(Equal(defaultMaxNodeMaintenanceTime)) - newTime := 300 * time.Second - options.Store(newTime) - Expect(options.MaxNodeMaintenanceTime()).To(Equal(defaultMaxNodeMaintenanceTime)) - options.Load() - Expect(options.MaxNodeMaintenanceTime()).To(Equal(newTime)) - }) - }) - }) })