From 8142d1803603130b17d40d7ac35bd464aba0239f Mon Sep 17 00:00:00 2001 From: Alexey Makhov Date: Tue, 19 Sep 2023 15:27:56 +0300 Subject: [PATCH] Client connection tunneling prototype. Proxy mode Signed-off-by: Alexey Makhov --- .github/workflows/go.yml | 1 + api/bootstrap/v1beta1/k0s_types.go | 5 + ...cluster.x-k8s.io_k0scontrollerconfigs.yaml | 8 + ...ane.cluster.x-k8s.io_k0scontrolplanes.yaml | 8 + .../controlplane_bootstrap_controller.go | 17 +- .../k0s_controlplane_controller.go | 60 +++- internal/controller/controlplane/util.go | 67 +++- inttest/Makefile.variables | 1 + ...ontrolplane_docker_tunneling_proxy_test.go | 318 ++++++++++++++++++ ...capi_controlplane_docker_tunneling_test.go | 2 +- 10 files changed, 479 insertions(+), 8 deletions(-) create mode 100644 inttest/capi-controlplane-docker-tunneling-proxy/capi_controlplane_docker_tunneling_proxy_test.go diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 4dc3d5d48..559ba59a9 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -107,6 +107,7 @@ jobs: - check-capi-controlplane-docker - check-capi-controlplane-docker-downscaling - check-capi-controlplane-docker-tunneling + - check-capi-controlplane-docker-tunneling-proxy - check-capi-controlplane-docker-worker steps: - name: Check out code into the Go module directory diff --git a/api/bootstrap/v1beta1/k0s_types.go b/api/bootstrap/v1beta1/k0s_types.go index 6854f51ca..8837d8033 100644 --- a/api/bootstrap/v1beta1/k0s_types.go +++ b/api/bootstrap/v1beta1/k0s_types.go @@ -198,4 +198,9 @@ type TunnelingSpec struct { //+kubebuilder:validation:Optional //+kubebuilder:default=31443 TunnelingNodePort int32 `json:"tunnelingNodePort,omitempty"` + // Mode describes tunneling mode. + // If empty, k0smotron will use the default one. + //+kubebuilder:validation:Enum=tunnel;proxy + //+kubebuilder:default=tunnel + Mode string `json:"mode,omitempty"` } diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml index 257013bca..864cb4ed3 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml @@ -88,6 +88,14 @@ spec: default: false description: Enabled specifies whether tunneling is enabled. type: boolean + mode: + default: tunnel + description: Mode describes tunneling mode. If empty, k0smotron + will use the default one. + enum: + - tunnel + - proxy + type: string serverAddress: description: Server address of the tunneling server. If empty, k0smotron will try to detect worker node address for. diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml index 8e5839982..f87ce3ee4 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml @@ -90,6 +90,14 @@ spec: default: false description: Enabled specifies whether tunneling is enabled. type: boolean + mode: + default: tunnel + description: Mode describes tunneling mode. If empty, k0smotron + will use the default one. + enum: + - tunnel + - proxy + type: string serverAddress: description: Server address of the tunneling server. If empty, k0smotron will try to detect worker node address for. diff --git a/internal/controller/bootstrap/controlplane_bootstrap_controller.go b/internal/controller/bootstrap/controlplane_bootstrap_controller.go index 319e4ce07..eb549b641 100644 --- a/internal/controller/bootstrap/controlplane_bootstrap_controller.go +++ b/internal/controller/bootstrap/controlplane_bootstrap_controller.go @@ -328,6 +328,19 @@ func (c *ControlPlaneController) genTunnelingFiles(ctx context.Context, scope *S } frpToken := string(frpSecret.Data["value"]) + var modeConfig string + if kcs.Spec.Tunneling.Mode == "proxy" { + modeConfig = fmt.Sprintf(` + type = tcpmux + custom_domains = %s + multiplexer = httpconnect +`, scope.Cluster.Spec.ControlPlaneEndpoint.Host) + } else { + modeConfig = ` + remote_port = 6443 +` + } + tunnelingResources := ` --- apiVersion: v1 @@ -347,7 +360,7 @@ data: type = tcp local_ip = 10.96.0.1 local_port = 443 - remote_port = 6443 + %s --- apiVersion: apps/v1 kind: Deployment @@ -384,7 +397,7 @@ spec: return []cloudinit.File{{ Path: "/var/lib/k0s/manifests/k0smotron-tunneling/manifest.yaml", Permissions: "0644", - Content: fmt.Sprintf(tunnelingResources, kcs.Spec.Tunneling.ServerAddress, kcs.Spec.Tunneling.ServerNodePort, frpToken), + Content: fmt.Sprintf(tunnelingResources, kcs.Spec.Tunneling.ServerAddress, kcs.Spec.Tunneling.ServerNodePort, frpToken, modeConfig), }}, nil } diff --git a/internal/controller/controlplane/k0s_controlplane_controller.go b/internal/controller/controlplane/k0s_controlplane_controller.go index 5da4ce97c..e1281a8f5 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller.go +++ b/internal/controller/controlplane/k0s_controlplane_controller.go @@ -128,7 +128,7 @@ func (c *K0sController) Reconcile(ctx context.Context, req ctrl.Request) (res ct } -func (c *K0sController) reconcileKubeconfig(ctx context.Context, cluster *clusterv1.Cluster) error { +func (c *K0sController) reconcileKubeconfig(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) error { if cluster.Spec.ControlPlaneEndpoint.IsZero() { return errors.New("control plane endpoint is not set") } @@ -142,11 +142,53 @@ func (c *K0sController) reconcileKubeconfig(ctx context.Context, cluster *cluste return err } + if kcp.Spec.K0sConfigSpec.Tunneling.Enabled { + if kcp.Spec.K0sConfigSpec.Tunneling.Mode == "proxy" { + secretName := secret.Name(cluster.Name+"-proxied", secret.Kubeconfig) + err := c.Client.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: secretName}, &corev1.Secret{}) + if err != nil { + if apierrors.IsNotFound(err) { + kc, err := c.generateKubeconfig(ctx, cluster, fmt.Sprintf("https://%s", cluster.Spec.ControlPlaneEndpoint.String())) + if err != nil { + return err + } + + for cn := range kc.Clusters { + kc.Clusters[cn].ProxyURL = fmt.Sprintf("http://%s:%d", kcp.Spec.K0sConfigSpec.Tunneling.ServerAddress, kcp.Spec.K0sConfigSpec.Tunneling.TunnelingNodePort) + } + + err = c.createKubeconfigSecret(ctx, kc, cluster, secretName) + if err != nil { + return err + } + } + return err + } + } else { + secretName := secret.Name(cluster.Name+"-tunneled", secret.Kubeconfig) + err := c.Client.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: secretName}, &corev1.Secret{}) + if err != nil { + if apierrors.IsNotFound(err) { + kc, err := c.generateKubeconfig(ctx, cluster, fmt.Sprintf("https://%s:%d", kcp.Spec.K0sConfigSpec.Tunneling.ServerAddress, kcp.Spec.K0sConfigSpec.Tunneling.TunnelingNodePort)) + if err != nil { + return err + } + + err = c.createKubeconfigSecret(ctx, kc, cluster, secretName) + if err != nil { + return err + } + } + return err + } + } + } + return nil } func (c *K0sController) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) (ctrl.Result, error) { - err := c.reconcileKubeconfig(ctx, cluster) + err := c.reconcileKubeconfig(ctx, cluster, kcp) if err != nil { return ctrl.Result{}, fmt.Errorf("error reconciling kubeconfig secret: %w", err) } @@ -276,12 +318,24 @@ func (c *K0sController) reconcileTunneling(ctx context.Context, cluster *cluster return fmt.Errorf("error creating FRP token secret: %w", err) } - frpsConfig := ` + var frpsConfig string + if kcp.Spec.K0sConfigSpec.Tunneling.Mode == "proxy" { + frpsConfig = ` +[common] +bind_port = 7000 +tcpmux_httpconnect_port = 6443 +authentication_method = token +token = ` + frpToken + ` +` + } else { + frpsConfig = ` [common] bind_port = 7000 authentication_method = token token = ` + frpToken + ` ` + } + frpsCMName := kcp.GetName() + "-frps-config" cm := corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ diff --git a/internal/controller/controlplane/util.go b/internal/controller/controlplane/util.go index 0e846678b..75ad49ebd 100644 --- a/internal/controller/controlplane/util.go +++ b/internal/controller/controlplane/util.go @@ -2,11 +2,21 @@ package controlplane import ( "context" + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util" - cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" - + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/cluster-api/util/certs" + "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/secret" "sigs.k8s.io/controller-runtime/pkg/client" + + cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" ) func (c *K0sController) getMachineTemplate(ctx context.Context, kcp *cpv1beta1.K0sControlPlane) (*unstructured.Unstructured, error) { @@ -25,3 +35,56 @@ func (c *K0sController) getMachineTemplate(ctx context.Context, kcp *cpv1beta1.K } return machineTemplate, nil } + +func (c *K0sController) generateKubeconfig(ctx context.Context, cluster *clusterv1.Cluster, endpoint string) (*api.Config, error) { + clusterName := util.ObjectKey(cluster) + clusterCA, err := secret.GetFromNamespacedName(ctx, c.Client, clusterName, secret.ClusterCA) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, kubeconfig.ErrDependentCertificateNotFound + } + return nil, err + } + + cert, err := certs.DecodeCertPEM(clusterCA.Data[secret.TLSCrtDataName]) + if err != nil { + return nil, fmt.Errorf("failed to decode CA Cert: %w", err) + } else if cert == nil { + return nil, fmt.Errorf("certificate not found in config: %w", err) + } + + key, err := certs.DecodePrivateKeyPEM(clusterCA.Data[secret.TLSKeyDataName]) + if err != nil { + return nil, fmt.Errorf("failed to decode private key: %w", err) + } else if key == nil { + return nil, fmt.Errorf("CA private key not found: %w", err) + } + + cfg, err := kubeconfig.New(clusterName.Name, endpoint, cert, key) + if err != nil { + return nil, fmt.Errorf("failed to generate a kubeconfig: %w", err) + } + + return cfg, nil + +} + +func (c *K0sController) createKubeconfigSecret(ctx context.Context, cfg *api.Config, cluster *clusterv1.Cluster, secretName string) error { + cfgBytes, err := clientcmd.Write(*cfg) + if err != nil { + return fmt.Errorf("failed to serialize config to yaml: %w", err) + } + + clusterName := util.ObjectKey(cluster) + owner := metav1.OwnerReference{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: cluster.Name, + UID: cluster.UID, + } + + kcSecret := kubeconfig.GenerateSecretWithOwner(clusterName, cfgBytes, owner) + kcSecret.Name = secretName + + return c.Create(ctx, kcSecret) +} diff --git a/inttest/Makefile.variables b/inttest/Makefile.variables index 7032546a9..c69ce4d59 100644 --- a/inttest/Makefile.variables +++ b/inttest/Makefile.variables @@ -15,5 +15,6 @@ smoketests := \ check-capi-controlplane-docker-downscaling \ check-capi-controlplane-docker-worker \ check-capi-controlplane-docker-tunneling \ + check-capi-controlplane-docker-tunneling-proxy \ check-monitoring \ check-capi-docker-machinedeployment \ diff --git a/inttest/capi-controlplane-docker-tunneling-proxy/capi_controlplane_docker_tunneling_proxy_test.go b/inttest/capi-controlplane-docker-tunneling-proxy/capi_controlplane_docker_tunneling_proxy_test.go new file mode 100644 index 000000000..af6dde2e6 --- /dev/null +++ b/inttest/capi-controlplane-docker-tunneling-proxy/capi_controlplane_docker_tunneling_proxy_test.go @@ -0,0 +1,318 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capicontolplanedockertunneling + +import ( + "context" + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "net/http" + "net/url" + "os" + "os/exec" + "strconv" + "strings" + "testing" + "time" + + k0stestutil "github.com/k0sproject/k0s/inttest/common" + "github.com/k0sproject/k0smotron/inttest/util" + + "github.com/stretchr/testify/suite" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +type CAPIControlPlaneDockerSuite struct { + suite.Suite + client *kubernetes.Clientset + restConfig *rest.Config + clusterYamlsPath string + ctx context.Context +} + +func TestCAPIControlPlaneDockerSuite(t *testing.T) { + s := CAPIControlPlaneDockerSuite{} + suite.Run(t, &s) +} + +func (s *CAPIControlPlaneDockerSuite) SetupSuite() { + kubeConfigPath := os.Getenv("KUBECONFIG") + s.Require().NotEmpty(kubeConfigPath, "KUBECONFIG env var must be set and point to kind cluster") + // Get kube client from kubeconfig + restCfg, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath) + s.Require().NoError(err) + s.Require().NotNil(restCfg) + s.restConfig = restCfg + + // Get kube client from kubeconfig + kubeClient, err := kubernetes.NewForConfig(restCfg) + s.Require().NoError(err) + s.Require().NotNil(kubeClient) + s.client = kubeClient + + tmpDir := s.T().TempDir() + s.clusterYamlsPath = tmpDir + "/cluster.yaml" + s.Require().NoError(os.WriteFile(s.clusterYamlsPath, []byte(dockerClusterYaml), 0644)) + + s.ctx, _ = util.NewSuiteContext(s.T()) +} + +func (s *CAPIControlPlaneDockerSuite) TestCAPIControlPlaneDocker() { + + // Apply the child cluster objects + s.applyClusterObjects() + defer func() { + keep := os.Getenv("KEEP_AFTER_TEST") + if keep == "true" { + return + } + if keep == "on-failure" && s.T().Failed() { + return + } + s.T().Log("Deleting cluster objects") + s.deleteCluster() + }() + s.T().Log("cluster objects applied, waiting for cluster to be ready") + + var localPort int + // nolint:staticcheck + err := wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + localPort, _ = getLBPort("docker-test-cluster-lb") + return localPort > 0, nil + }) + s.Require().NoError(err) + + s.T().Log("waiting to see admin kubeconfig secret") + kmcKC, err := util.GetKMCClientSet(s.ctx, s.client, "docker-test-cluster", "default", localPort) + s.Require().NoError(err) + + // nolint:staticcheck + err = wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + b, _ := s.client.RESTClient(). + Get(). + AbsPath("/healthz"). + DoRaw(context.Background()) + + return string(b) == "ok", nil + }) + s.Require().NoError(err) + + // nolint:staticcheck + err = wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + output, err := exec.Command("docker", "exec", "docker-test-cluster-docker-test-0", "k0s", "status").Output() + if err != nil { + return false, nil + } + + return strings.Contains(string(output), "Version:"), nil + }) + s.Require().NoError(err) + + s.T().Log("waiting for node to be ready") + s.Require().NoError(k0stestutil.WaitForNodeReadyStatus(s.ctx, kmcKC, "docker-test-cluster-docker-test-worker-0", corev1.ConditionTrue)) + + s.T().Log("waiting for frp server to be ready") + s.Require().NoError(k0stestutil.WaitForDeployment(s.ctx, s.client, "docker-test-frps", "default")) + + s.T().Log("waiting for frp client to be ready") + s.Require().NoError(k0stestutil.WaitForDeployment(s.ctx, kmcKC, "frpc", "kube-system")) + + s.T().Log("checking connectivity to the child cluster via tunnel") + + forwardedPort := 31443 + + tunneledKmcKC, err := GetKMCClientSetWithProxy(s.ctx, s.client, "docker-test-cluster-proxied", "default", forwardedPort) + s.Require().NoError(err) + + s.T().Log("check for node to be ready via tunnel") + _, err = tunneledKmcKC.RESTClient(). + Get(). + AbsPath("/healthz"). + DoRaw(context.Background()) + s.Require().NoError(err) + + s.Require().NoError(k0stestutil.WaitForNodeReadyStatus(s.ctx, tunneledKmcKC, "docker-test-cluster-docker-test-worker-0", corev1.ConditionTrue)) + + s.Require().NoError(k0stestutil.WaitForDeployment(s.ctx, tunneledKmcKC, "frpc", "kube-system")) +} + +func GetKMCClientSetWithProxy(ctx context.Context, kc *kubernetes.Clientset, name string, namespace string, port int) (*kubernetes.Clientset, error) { + secretName := fmt.Sprintf("%s-kubeconfig", name) + // Wait first to see the secret exists + if err := util.WaitForSecret(ctx, kc, secretName, namespace); err != nil { + return nil, err + } + kubeConf, err := kc.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + kmcCfg, err := clientcmd.RESTConfigFromKubeConfig([]byte(kubeConf.Data["value"])) + if err != nil { + return nil, err + } + + // Override the host to point to the port forwarded API server + proxyURL, _ := url.Parse(fmt.Sprintf("http://localhost:%d", port)) + kmcCfg.Proxy = http.ProxyURL(proxyURL) + + return kubernetes.NewForConfig(kmcCfg) +} + +func (s *CAPIControlPlaneDockerSuite) applyClusterObjects() { + // Exec via kubectl + out, err := exec.Command("kubectl", "apply", "-f", s.clusterYamlsPath).CombinedOutput() + s.Require().NoError(err, "failed to apply cluster objects: %s", string(out)) +} + +func (s *CAPIControlPlaneDockerSuite) deleteCluster() { + // Exec via kubectl + out, err := exec.Command("kubectl", "delete", "-f", s.clusterYamlsPath).CombinedOutput() + s.Require().NoError(err, "failed to delete cluster objects: %s", string(out)) +} + +func getLBPort(name string) (int, error) { + b, err := exec.Command("docker", "inspect", name, "--format", "{{json .NetworkSettings.Ports}}").Output() + if err != nil { + return 0, fmt.Errorf("failed to get inspect info from container %s: %w", name, err) + } + + var ports map[string][]map[string]string + err = json.Unmarshal(b, &ports) + if err != nil { + return 0, fmt.Errorf("failed to unmarshal inspect info from container %s: %w", name, err) + } + + return strconv.Atoi(ports["6443/tcp"][0]["HostPort"]) +} + +var dockerClusterYaml = ` +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: docker-test-cluster + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + serviceDomain: cluster.local + services: + cidrBlocks: + - 10.128.0.0/12 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: K0sControlPlane + name: docker-test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: docker-test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: docker-test-cp-template + namespace: default +spec: + template: + spec: {} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K0sControlPlane +metadata: + name: docker-test +spec: + replicas: 1 + k0sConfigSpec: + tunneling: + enabled: true + mode: proxy + k0s: + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + metadata: + name: k0s + spec: + api: + extraArgs: + anonymous-auth: "true" + telemetry: + enabled: false + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: docker-test-cp-template + namespace: default +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + name: docker-test + namespace: default +spec: +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Machine +metadata: + name: docker-test-worker-0 + namespace: default +spec: + version: v1.27.1 + clusterName: docker-test-cluster + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: K0sWorkerConfig + name: docker-test-worker-0 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachine + name: docker-test-worker-0 +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: K0sWorkerConfig +metadata: + name: docker-test-worker-0 + namespace: default +spec: + # version is deliberately different to be able to verify we actually pick it up :) + version: v1.27.1+k0s.0 + args: + - --labels=k0sproject.io/foo=bar + preStartCommands: + - echo -n "pre-start" > /tmp/pre-start + postStartCommands: + - echo -n "post-start" > /tmp/post-start + files: + - path: /tmp/test-file + content: test-file +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachine +metadata: + name: docker-test-worker-0 + namespace: default +spec: +` diff --git a/inttest/capi-controlplane-docker-tunneling/capi_controlplane_docker_tunneling_test.go b/inttest/capi-controlplane-docker-tunneling/capi_controlplane_docker_tunneling_test.go index 205d35f7c..709374a64 100644 --- a/inttest/capi-controlplane-docker-tunneling/capi_controlplane_docker_tunneling_test.go +++ b/inttest/capi-controlplane-docker-tunneling/capi_controlplane_docker_tunneling_test.go @@ -162,7 +162,7 @@ func (s *CAPIControlPlaneDockerSuite) TestCAPIControlPlaneDocker() { }) s.Require().NoError(err) - tunneledKmcKC, err := util.GetKMCClientSet(s.ctx, s.client, "docker-test-cluster", "default", forwardedPort) + tunneledKmcKC, err := util.GetKMCClientSet(s.ctx, s.client, "docker-test-cluster-tunneled", "default", forwardedPort) s.Require().NoError(err) s.T().Log("check for node to be ready via tunnel")