From 9fb5dccced01779b1904163eddf40007b4d01648 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan-Luis=20de=20Sousa-Valadas=20Casta=C3=B1o?= Date: Mon, 20 May 2024 14:04:47 +0200 Subject: [PATCH] Allow CPLB to work with externalAddress MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow this behavior if the endpoint-reconciler is disabled. Signed-off-by: Juan-Luis de Sousa-Valadas Castaño --- cmd/controller/controller.go | 18 +- docs/cplb.md | 297 ++++++++++++++++++-- inttest/cplb/cplb_test.go | 9 +- pkg/apis/k0s/v1beta1/clusterconfig_types.go | 2 +- pkg/apis/k0s/v1beta1/cplb.go | 11 +- pkg/component/controller/cplb_unix.go | 32 ++- pkg/component/controller/cplb_windows.go | 13 +- 7 files changed, 315 insertions(+), 67 deletions(-) diff --git a/cmd/controller/controller.go b/cmd/controller/controller.go index 9733d7e9fed8..1ef19ffec7b7 100644 --- a/cmd/controller/controller.go +++ b/cmd/controller/controller.go @@ -228,24 +228,26 @@ func (c *command) start(ctx context.Context) error { nodeComponents.Add(ctx, controllerLeaseCounter) } + disableEndpointReconciler := !slices.Contains(c.DisableComponents, constant.APIEndpointReconcilerComponentName) && + nodeConfig.Spec.API.ExternalAddress != "" + if cplb := nodeConfig.Spec.Network.ControlPlaneLoadBalancing; cplb != nil && cplb.Enabled { if c.SingleNode { return errors.New("control plane load balancing cannot be used in a single-node cluster") } nodeComponents.Add(ctx, &controller.Keepalived{ - K0sVars: c.K0sVars, - Config: cplb.Keepalived, - DetailedLogging: c.Debug, - LogConfig: c.Debug, - KubeConfigPath: c.K0sVars.AdminKubeConfigPath, - APIPort: nodeConfig.Spec.API.Port, + K0sVars: c.K0sVars, + Config: cplb.Keepalived, + DetailedLogging: c.Debug, + LogConfig: c.Debug, + KubeConfigPath: c.K0sVars.AdminKubeConfigPath, + APIPort: nodeConfig.Spec.API.Port, + HasEndpointReconciler: disableEndpointReconciler, }) } enableKonnectivity := !c.SingleNode && !slices.Contains(c.DisableComponents, constant.KonnectivityServerComponentName) - disableEndpointReconciler := !slices.Contains(c.DisableComponents, constant.APIEndpointReconcilerComponentName) && - nodeConfig.Spec.API.ExternalAddress != "" if enableKonnectivity { nodeComponents.Add(ctx, &controller.Konnectivity{ diff --git a/docs/cplb.md b/docs/cplb.md index 4e907a2cfffe..2d03d147c033 100644 --- a/docs/cplb.md +++ b/docs/cplb.md @@ -7,10 +7,15 @@ CPLB has two features that are independent, but normally will be used together: automatic assignation of predefined IP addresses using VRRP across control plane nodes. VirtualServers allows to do Load Balancing to the other control plane nodes. -This feature is intended to be used for external traffic. This feature is fully compatible with +By default, CPLB is not used for internal traffic. This feature is fully compatible with [node-local load balancing (NLLB)](nllb.md) which means CPLB can be used for external traffic and NLLB for internal traffic at the same time. +Additionally, CPLB can be used for internal traffic as well if [`spec.api.externalAddress`][specapi] is +configured and the [endpoint-reconciler is disabled.](configuration.md#disabling-controller-components) + +[specapi]: configuration.md#specapi + ## Technical functionality The k0s control plane load balancer provides k0s with virtual IPs and TCP @@ -52,9 +57,9 @@ following: These do not provide any sort of security against ill-intentioned attacks, they are safety features to prevent accidental conflicts between VRRP instances in the same network segment. -* If `VirtualServers` are used, the cluster configuration mustn't specify a non-empty - [`spec.api.externalAddress`][specapi]. If only `VRRPInstances` are specified, a - non-empty [`spec.api.externalAddress`][specapi] may be specified. +* If both `VirtualServers` and a non-empty [`spec.api.externalAddress`][specapi] are specified, + then the [endpoint-reconciler component must be disabled.](configuration.md#disabling-controller-components) + The `endpoint-reconciler` must be disabled in every control plane node. Add the following to the cluster configuration (`k0s.yaml`): @@ -69,7 +74,7 @@ spec: - virtualIPs: ["/ virtualServers: - - ipAddress: "ipAddress" + - ipAddress: "" ``` Or alternatively, if using [`k0sctl`](k0sctl-install.md), add the following to @@ -86,19 +91,17 @@ spec: type: Keepalived keepalived: vrrpInstances: - - virtualIPs: ["/"] + - virtualIPs: ["/"] authPass: virtualServers: - - ipAddress: "" + - ipAddress: "" ``` Because this is a feature intended to configure the apiserver, CPLB noes not support dynamic configuration and in order to make changes you need to restart the k0s controllers to make changes. -[specapi]: configuration.md#specapi - -## Full example using `k0sctl` +## Full example using `k0sctl` and externalAddress The following example shows a full `k0sctl` configuration file featuring three controllers and three workers with control plane load balancing enabled. @@ -115,51 +118,295 @@ spec: address: controller-0.k0s.lab user: root keyPath: ~/.ssh/id_rsa - k0sBinaryPath: /opt/k0s - uploadBinary: true + installFlags: + - --disable-components=endpoint-reconciler - role: controller ssh: address: controller-1.k0s.lab user: root keyPath: ~/.ssh/id_rsa - k0sBinaryPath: /opt/k0s - uploadBinary: true + installFlags: + - --disable-components=endpoint-reconciler - role: controller ssh: address: controller-2.k0s.lab user: root keyPath: ~/.ssh/id_rsa - k0sBinaryPath: /opt/k0s - uploadBinary: true + installFlags: + - --disable-components=endpoint-reconciler - role: worker ssh: address: worker-0.k0s.lab user: root keyPath: ~/.ssh/id_rsa - k0sBinaryPath: /opt/k0s - uploadBinary: true - role: worker ssh: address: worker-1.k0s.lab user: root keyPath: ~/.ssh/id_rsa - k0sBinaryPath: /opt/k0s - uploadBinary: true - role: worker ssh: address: worker-2.k0s.lab user: root keyPath: ~/.ssh/id_rsa - k0sBinaryPath: /opt/k0s - uploadBinary: true k0s: version: v{{{ extra.k8s_version }}}+k0s.0 config: spec: api: - sans: - - 192.168.122.200 + externalAddress: 192.168.122.200 network: + controlPlaneLoadBalancing: + enabled: true + type: Keepalived + keepalived: + vrrpInstances: + - virtualIPs: ["192.168.122.200/24"] + authPass: Example + virtualServers: + - ipAddress: "192.168.122.200" +``` + +Save the above configuration into a file called `k0sctl.yaml` and apply it in +order to bootstrap the cluster: + +```console +$ k0sctl apply +⠀⣿⣿⡇⠀⠀⢀⣴⣾⣿⠟⠁⢸⣿⣿⣿⣿⣿⣿⣿⡿⠛⠁⠀⢸⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀█████████ █████████ ███ +⠀⣿⣿⡇⣠⣶⣿⡿⠋⠀⠀⠀⢸⣿⡇⠀⠀⠀⣠⠀⠀⢀⣠⡆⢸⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀███ ███ ███ +⠀⣿⣿⣿⣿⣟⠋⠀⠀⠀⠀⠀⢸⣿⡇⠀⢰⣾⣿⠀⠀⣿⣿⡇⢸⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀███ ███ ███ +⠀⣿⣿⡏⠻⣿⣷⣤⡀⠀⠀⠀⠸⠛⠁⠀⠸⠋⠁⠀⠀⣿⣿⡇⠈⠉⠉⠉⠉⠉⠉⠉⠉⢹⣿⣿⠀███ ███ ███ +⠀⣿⣿⡇⠀⠀⠙⢿⣿⣦⣀⠀⠀⠀⣠⣶⣶⣶⣶⣶⣶⣿⣿⡇⢰⣶⣶⣶⣶⣶⣶⣶⣶⣾⣿⣿⠀█████████ ███ ██████████ +k0sctl Copyright 2023, k0sctl authors. +Anonymized telemetry of usage will be sent to the authors. +By continuing to use k0sctl you agree to these terms: +https://k0sproject.io/licenses/eula +level=info msg="==> Running phase: Connect to hosts" +level=info msg="[ssh] worker-2.k0s.lab:22: connected" +level=info msg="[ssh] controller-2.k0s.lab:22: connected" +level=info msg="[ssh] worker-1.k0s.lab:22: connected" +level=info msg="[ssh] worker-0.k0s.lab:22: connected" +level=info msg="[ssh] controller-0.k0s.lab:22: connected" +level=info msg="[ssh] controller-1.k0s.lab:22: connected" +level=info msg="==> Running phase: Detect host operating systems" +level=info msg="[ssh] worker-2.k0s.lab:22: is running Fedora Linux 38 (Cloud Edition)" +level=info msg="[ssh] controller-2.k0s.lab:22: is running Fedora Linux 38 (Cloud Edition)" +level=info msg="[ssh] controller-0.k0s.lab:22: is running Fedora Linux 38 (Cloud Edition)" +level=info msg="[ssh] controller-1.k0s.lab:22: is running Fedora Linux 38 (Cloud Edition)" +level=info msg="[ssh] worker-0.k0s.lab:22: is running Fedora Linux 38 (Cloud Edition)" +level=info msg="[ssh] worker-1.k0s.lab:22: is running Fedora Linux 38 (Cloud Edition)" +level=info msg="==> Running phase: Acquire exclusive host lock" +level=info msg="==> Running phase: Prepare hosts" +level=info msg="==> Running phase: Gather host facts" +level=info msg="[ssh] worker-2.k0s.lab:22: using worker-2.k0s.lab as hostname" +level=info msg="[ssh] controller-0.k0s.lab:22: using controller-0.k0s.lab as hostname" +level=info msg="[ssh] controller-2.k0s.lab:22: using controller-2.k0s.lab as hostname" +level=info msg="[ssh] controller-1.k0s.lab:22: using controller-1.k0s.lab as hostname" +level=info msg="[ssh] worker-1.k0s.lab:22: using worker-1.k0s.lab as hostname" +level=info msg="[ssh] worker-0.k0s.lab:22: using worker-0.k0s.lab as hostname" +level=info msg="[ssh] worker-2.k0s.lab:22: discovered eth0 as private interface" +level=info msg="[ssh] controller-0.k0s.lab:22: discovered eth0 as private interface" +level=info msg="[ssh] controller-2.k0s.lab:22: discovered eth0 as private interface" +level=info msg="[ssh] controller-1.k0s.lab:22: discovered eth0 as private interface" +level=info msg="[ssh] worker-1.k0s.lab:22: discovered eth0 as private interface" +level=info msg="[ssh] worker-0.k0s.lab:22: discovered eth0 as private interface" +level=info msg="[ssh] worker-2.k0s.lab:22: discovered 192.168.122.210 as private address" +level=info msg="[ssh] controller-0.k0s.lab:22: discovered 192.168.122.37 as private address" +level=info msg="[ssh] controller-2.k0s.lab:22: discovered 192.168.122.87 as private address" +level=info msg="[ssh] controller-1.k0s.lab:22: discovered 192.168.122.185 as private address" +level=info msg="[ssh] worker-1.k0s.lab:22: discovered 192.168.122.81 as private address" +level=info msg="[ssh] worker-0.k0s.lab:22: discovered 192.168.122.219 as private address" +level=info msg="==> Running phase: Validate hosts" +level=info msg="==> Running phase: Validate facts" +level=info msg="==> Running phase: Download k0s binaries to local host" +level=info msg="==> Running phase: Upload k0s binaries to hosts" +level=info msg="[ssh] controller-0.k0s.lab:22: uploading k0s binary from /opt/k0s" +level=info msg="[ssh] controller-2.k0s.lab:22: uploading k0s binary from /opt/k0s" +level=info msg="[ssh] worker-0.k0s.lab:22: uploading k0s binary from /opt/k0s" +level=info msg="[ssh] controller-1.k0s.lab:22: uploading k0s binary from /opt/k0s" +level=info msg="[ssh] worker-1.k0s.lab:22: uploading k0s binary from /opt/k0s" +level=info msg="[ssh] worker-2.k0s.lab:22: uploading k0s binary from /opt/k0s" +level=info msg="==> Running phase: Install k0s binaries on hosts" +level=info msg="[ssh] controller-0.k0s.lab:22: validating configuration" +level=info msg="[ssh] controller-1.k0s.lab:22: validating configuration" +level=info msg="[ssh] controller-2.k0s.lab:22: validating configuration" +level=info msg="==> Running phase: Configure k0s" +level=info msg="[ssh] controller-0.k0s.lab:22: installing new configuration" +level=info msg="[ssh] controller-2.k0s.lab:22: installing new configuration" +level=info msg="[ssh] controller-1.k0s.lab:22: installing new configuration" +level=info msg="==> Running phase: Initialize the k0s cluster" +level=info msg="[ssh] controller-0.k0s.lab:22: installing k0s controller" +level=info msg="[ssh] controller-0.k0s.lab:22: waiting for the k0s service to start" +level=info msg="[ssh] controller-0.k0s.lab:22: waiting for kubernetes api to respond" +level=info msg="==> Running phase: Install controllers" +level=info msg="[ssh] controller-2.k0s.lab:22: validating api connection to https://192.168.122.200:6443" +level=info msg="[ssh] controller-1.k0s.lab:22: validating api connection to https://192.168.122.200:6443" +level=info msg="[ssh] controller-0.k0s.lab:22: generating token" +level=info msg="[ssh] controller-1.k0s.lab:22: writing join token" +level=info msg="[ssh] controller-1.k0s.lab:22: installing k0s controller" +level=info msg="[ssh] controller-1.k0s.lab:22: starting service" +level=info msg="[ssh] controller-1.k0s.lab:22: waiting for the k0s service to start" +level=info msg="[ssh] controller-1.k0s.lab:22: waiting for kubernetes api to respond" +level=info msg="[ssh] controller-0.k0s.lab:22: generating token" +level=info msg="[ssh] controller-2.k0s.lab:22: writing join token" +level=info msg="[ssh] controller-2.k0s.lab:22: installing k0s controller" +level=info msg="[ssh] controller-2.k0s.lab:22: starting service" +level=info msg="[ssh] controller-2.k0s.lab:22: waiting for the k0s service to start" +level=info msg="[ssh] controller-2.k0s.lab:22: waiting for kubernetes api to respond" +level=info msg="==> Running phase: Install workers" +level=info msg="[ssh] worker-2.k0s.lab:22: validating api connection to https://192.168.122.200:6443" +level=info msg="[ssh] worker-1.k0s.lab:22: validating api connection to https://192.168.122.200:6443" +level=info msg="[ssh] worker-0.k0s.lab:22: validating api connection to https://192.168.122.200:6443" +level=info msg="[ssh] controller-0.k0s.lab:22: generating a join token for worker 1" +level=info msg="[ssh] controller-0.k0s.lab:22: generating a join token for worker 2" +level=info msg="[ssh] controller-0.k0s.lab:22: generating a join token for worker 3" +level=info msg="[ssh] worker-2.k0s.lab:22: writing join token" +level=info msg="[ssh] worker-0.k0s.lab:22: writing join token" +level=info msg="[ssh] worker-1.k0s.lab:22: writing join token" +level=info msg="[ssh] worker-2.k0s.lab:22: installing k0s worker" +level=info msg="[ssh] worker-1.k0s.lab:22: installing k0s worker" +level=info msg="[ssh] worker-0.k0s.lab:22: installing k0s worker" +level=info msg="[ssh] worker-2.k0s.lab:22: starting service" +level=info msg="[ssh] worker-1.k0s.lab:22: starting service" +level=info msg="[ssh] worker-0.k0s.lab:22: starting service" +level=info msg="[ssh] worker-2.k0s.lab:22: waiting for node to become ready" +level=info msg="[ssh] worker-0.k0s.lab:22: waiting for node to become ready" +level=info msg="[ssh] worker-1.k0s.lab:22: waiting for node to become ready" +level=info msg="==> Running phase: Release exclusive host lock" +level=info msg="==> Running phase: Disconnect from hosts" +level=info msg="==> Finished in 2m20s" +level=info msg="k0s cluster version v{{{ extra.k8s_version }}}+k0s.0 is now installed" +level=info msg="Tip: To access the cluster you can now fetch the admin kubeconfig using:" +level=info msg=" k0sctl kubeconfig" +``` + +The cluster with the two nodes should be available by now. Setup the kubeconfig +file in order to interact with it: + +```shell +k0sctl kubeconfig > k0s-kubeconfig +export KUBECONFIG=$(pwd)/k0s-kubeconfig +``` + +All three worker nodes are ready: + +```console +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +worker-0.k0s.lab Ready 8m51s v{{{ extra.k8s_version }}}+k0s +worker-1.k0s.lab Ready 8m51s v{{{ extra.k8s_version }}}+k0s +worker-2.k0s.lab Ready 8m51s v{{{ extra.k8s_version }}}+k0s +``` + +Each controller node has a dummy interface with the VIP and /32 netmask, +but only one has it in the real nic: + +```console +$ for i in controller-{0..2} ; do echo $i ; ssh $i -- ip -4 --oneline addr show | grep -e eth0 -e dummyvip0; done +controller-0 +2: eth0 inet 192.168.122.37/24 brd 192.168.122.255 scope global dynamic noprefixroute eth0\ valid_lft 2381sec preferred_lft 2381sec +2: eth0 inet 192.168.122.200/24 scope global secondary eth0\ valid_lft forever preferred_lft forever +3: dummyvip0 inet 192.168.122.200/32 scope global dummyvip0\ valid_lft forever preferred_lft forever +controller-1 +2: eth0 inet 192.168.122.185/24 brd 192.168.122.255 scope global dynamic noprefixroute eth0\ valid_lft 2390sec preferred_lft 2390sec +3: dummyvip0 inet 192.168.122.200/32 scope global dummyvip0\ valid_lft forever preferred_lft forever +controller-2 +2: eth0 inet 192.168.122.87/24 brd 192.168.122.255 scope global dynamic noprefixroute eth0\ valid_lft 2399sec preferred_lft 2399sec +3: dummyvip0 inet 192.168.122.200/32 scope global dummyvip0\ valid_lft forever preferred_lft forever +``` + +The cluster is using control plane load balancing and is able to tolerate the +outage of one controller node. Shutdown the first controller to simulate a +failure condition: + +```console +$ ssh controller-0 'sudo poweroff' +Connection to 192.168.122.37 closed by remote host. +``` + +Control plane load balancing provides high availability, the VIP will have moved to a different node: + +```console +$ for i in controller-{0..2} ; do echo $i ; ssh $i -- ip -4 --oneline addr show | grep -e eth0 -e dummyvip0; done +controller-1 +2: eth0 inet 192.168.122.185/24 brd 192.168.122.255 scope global dynamic noprefixroute eth0\ valid_lft 2173sec preferred_lft 2173sec +2: eth0 inet 192.168.122.200/24 scope global secondary eth0\ valid_lft forever preferred_lft forever +3: dummyvip0 inet 192.168.122.200/32 scope global dummyvip0\ valid_lft forever preferred_lft forever +controller-2 +2: eth0 inet 192.168.122.87/24 brd 192.168.122.255 scope global dynamic noprefixroute eth0\ valid_lft 2182sec preferred_lft 2182sec +3: dummyvip0 inet 192.168.122.200/32 scope global dummyvip0\ valid_lft forever preferred_lft forever + +$ for i in controller-{0..2} ; do echo $i ; ipvsadm --save -n; done +IP Virtual Server version 1.2.1 (size=4096) +Prot LocalAddress:Port Scheduler Flags + -> RemoteAddress:Port Forward Weight ActiveConn InActConn +TCP 192.168.122.200:6443 rr persistent 360 + -> 192.168.122.185:6443 Route 1 0 0 + -> 192.168.122.87:6443 Route 1 0 0 + -> 192.168.122.122:6443 Route 1 0 0 +```` + +And the cluster will be working normally: + +```console +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +worker-0.k0s.lab Ready 8m51s v{{{ extra.k8s_version }}}+k0s +worker-1.k0s.lab Ready 8m51s v{{{ extra.k8s_version }}}+k0s +worker-2.k0s.lab Ready 8m51s v{{{ extra.k8s_version }}}+k0s +``` + +## Full example using `k0sctl` and NLLB + +The following example shows a full `k0sctl` configuration file featuring three +controllers and three workers with control plane load balancing enabled. +In this example we use 192.168.122.200 as the CPLB IP address. + +```yaml +apiVersion: k0sctl.k0sproject.io/v1beta1 +kind: Cluster +metadata: + name: k0s-cluster +spec: + hosts: + - role: controller + ssh: + address: controller-0.k0s.lab + user: root + keyPath: ~/.ssh/id_rsa + - role: controller + ssh: + address: controller-1.k0s.lab + user: root + keyPath: ~/.ssh/id_rsa + - role: controller + ssh: + address: controller-2.k0s.lab + user: root + keyPath: ~/.ssh/id_rsa + - role: worker + ssh: + address: worker-0.k0s.lab + user: root + keyPath: ~/.ssh/id_rsa + - role: worker + ssh: + address: worker-1.k0s.lab + user: root + keyPath: ~/.ssh/id_rsa + - role: worker + ssh: + address: worker-2.k0s.lab + user: root + keyPath: ~/.ssh/id_rsa + k0s: + version: v{{{ extra.k8s_version }}}+k0s.0 + config: + spec: + network: + nodeLocalLoadBalancing: + enabled: true + type: EnvoyProxy controlPlaneLoadBalancing: enabled: true type: Keepalived: @@ -168,7 +415,7 @@ spec: - virtualIPs: ["192.168.122.200/24"] authPass: Example virtualServers: - - ipAddress: "" + - ipAddress: "192.168.122.200" ``` Save the above configuration into a file called `k0sctl.yaml` and apply it in diff --git a/inttest/cplb/cplb_test.go b/inttest/cplb/cplb_test.go index 477d6c66112a..3518a045e89d 100644 --- a/inttest/cplb/cplb_test.go +++ b/inttest/cplb/cplb_test.go @@ -33,6 +33,8 @@ type keepalivedSuite struct { const haControllerConfig = ` spec: + api: + externalAddress: %s network: controlPlaneLoadBalancing: enabled: true @@ -43,9 +45,6 @@ spec: authPass: "123456" virtualServers: - ipAddress: %s - nodeLocalLoadBalancing: - enabled: true - type: EnvoyProxy ` // SetupTest prepares the controller and filesystem, getting it into a consistent @@ -57,10 +56,10 @@ func (s *keepalivedSuite) TestK0sGetsUp() { for idx := 0; idx < s.BootlooseSuite.ControllerCount; idx++ { s.Require().NoError(s.WaitForSSH(s.ControllerNode(idx), 2*time.Minute, 1*time.Second)) - s.PutFile(s.ControllerNode(idx), "/tmp/k0s.yaml", fmt.Sprintf(haControllerConfig, lb, lb)) + s.PutFile(s.ControllerNode(idx), "/tmp/k0s.yaml", fmt.Sprintf(haControllerConfig, lb, lb, lb)) // Note that the token is intentionally empty for the first controller - s.Require().NoError(s.InitController(idx, "--config=/tmp/k0s.yaml", "--disable-components=metrics-server", joinToken)) + s.Require().NoError(s.InitController(idx, "--config=/tmp/k0s.yaml", "--disable-components=metrics-server,endpoint-reconciler", joinToken)) s.Require().NoError(s.WaitJoinAPI(s.ControllerNode(idx))) // With the primary controller running, create the join token for subsequent controllers. diff --git a/pkg/apis/k0s/v1beta1/clusterconfig_types.go b/pkg/apis/k0s/v1beta1/clusterconfig_types.go index 493d943ab656..175229025089 100644 --- a/pkg/apis/k0s/v1beta1/clusterconfig_types.go +++ b/pkg/apis/k0s/v1beta1/clusterconfig_types.go @@ -336,7 +336,7 @@ func (s *ClusterSpec) Validate() (errs []error) { } if s.Network != nil && s.Network.ControlPlaneLoadBalancing != nil { - for _, err := range s.Network.ControlPlaneLoadBalancing.Validate(s.API.ExternalAddress) { + for _, err := range s.Network.ControlPlaneLoadBalancing.Validate() { errs = append(errs, fmt.Errorf("controlPlaneLoadBalancing: %w", err)) } } diff --git a/pkg/apis/k0s/v1beta1/cplb.go b/pkg/apis/k0s/v1beta1/cplb.go index 64c7ce952d48..d75321a26afd 100644 --- a/pkg/apis/k0s/v1beta1/cplb.go +++ b/pkg/apis/k0s/v1beta1/cplb.go @@ -280,7 +280,7 @@ func (k *KeepalivedSpec) validateVirtualServers() []error { } // Validate validates the ControlPlaneLoadBalancingSpec -func (c *ControlPlaneLoadBalancingSpec) Validate(externalAddress string) (errs []error) { +func (c *ControlPlaneLoadBalancingSpec) Validate() (errs []error) { if c == nil { return nil } @@ -293,21 +293,16 @@ func (c *ControlPlaneLoadBalancingSpec) Validate(externalAddress string) (errs [ errs = append(errs, fmt.Errorf("unsupported CPLB type: %s. Only allowed value: %s", c.Type, CPLBTypeKeepalived)) } - return append(errs, c.Keepalived.Validate(externalAddress)...) + return append(errs, c.Keepalived.Validate()...) } // Validate validates the KeepalivedSpec -func (k *KeepalivedSpec) Validate(externalAddress string) (errs []error) { +func (k *KeepalivedSpec) Validate() (errs []error) { if k == nil { return nil } errs = append(errs, k.validateVRRPInstances(nil)...) errs = append(errs, k.validateVirtualServers()...) - // CPLB reconciler relies in watching kubernetes.default.svc endpoints - if externalAddress != "" && len(k.VirtualServers) > 0 { - errs = append(errs, errors.New(".spec.api.externalAddress and virtual servers cannot be used together")) - } - return errs } diff --git a/pkg/component/controller/cplb_unix.go b/pkg/component/controller/cplb_unix.go index e189bc4e1680..d21d5b01db11 100644 --- a/pkg/component/controller/cplb_unix.go +++ b/pkg/component/controller/cplb_unix.go @@ -45,20 +45,21 @@ import ( // Keepalived is the controller for the keepalived process in the control plane load balancing type Keepalived struct { - K0sVars *config.CfgVars - Config *k0sAPI.KeepalivedSpec - DetailedLogging bool - LogConfig bool - APIPort int - KubeConfigPath string - keepalivedConfig *keepalivedConfig - uid int - supervisor *supervisor.Supervisor - log *logrus.Entry - configFilePath string - reconciler *CPLBReconciler - updateCh chan struct{} - reconcilerDone chan struct{} + K0sVars *config.CfgVars + Config *k0sAPI.KeepalivedSpec + DetailedLogging bool + LogConfig bool + APIPort int + KubeConfigPath string + HasEndpointReconciler bool + keepalivedConfig *keepalivedConfig + uid int + supervisor *supervisor.Supervisor + log *logrus.Entry + configFilePath string + reconciler *CPLBReconciler + updateCh chan struct{} + reconcilerDone chan struct{} } // Init extracts the needed binaries and creates the directories @@ -92,6 +93,9 @@ func (k *Keepalived) Start(_ context.Context) error { } if len(k.Config.VirtualServers) > 0 { + if k.HasEndpointReconciler { + return errors.New("virtual servers are not supported with the endpoint-reconciler enabled.") + } k.log.Info("Starting CPLB reconciler") updateCh := make(chan struct{}, 1) k.reconciler = NewCPLBReconciler(k.KubeConfigPath, updateCh) diff --git a/pkg/component/controller/cplb_windows.go b/pkg/component/controller/cplb_windows.go index 93f023251030..9141544fd161 100644 --- a/pkg/component/controller/cplb_windows.go +++ b/pkg/component/controller/cplb_windows.go @@ -27,12 +27,13 @@ import ( // Keepalived doesn't work on windows, so we cannot implement it at all. // Just create the interface so that the CI doesn't complain. type Keepalived struct { - K0sVars *config.CfgVars - Config *k0sAPI.KeepalivedSpec - DetailedLogging bool - LogConfig bool - APIPort int - KubeConfigPath string + K0sVars *config.CfgVars + Config *k0sAPI.KeepalivedSpec + DetailedLogging bool + LogConfig bool + APIPort int + KubeConfigPath string + HasEndpointReconciler bool } func (k *Keepalived) Init(_ context.Context) error {