From bbeb159570d9b0744308d3199fe4d69dff898d91 Mon Sep 17 00:00:00 2001 From: Alexey Makhov Date: Wed, 13 Sep 2023 14:30:21 +0300 Subject: [PATCH 1/4] Client connection tunneling prototype Signed-off-by: Alexey Makhov # Conflicts: # .github/workflows/go.yml # inttest/Makefile.variables --- .github/workflows/go.yml | 4 +- api/bootstrap/v1beta1/k0s_types.go | 15 + .../v1beta1/zz_generated.deepcopy.go | 16 + api/controlplane/v1beta1/k0s_types.go | 2 +- ...cluster.x-k8s.io_k0scontrollerconfigs.yaml | 13 + ...ane.cluster.x-k8s.io_k0scontrolplanes.yaml | 13 + config/rbac/role.yaml | 12 + config/samples/capi/docker/kind.yaml | 5 +- .../controlplane_bootstrap_controller.go | 63 ++++ internal/controller/controlplane/helper.go | 14 +- .../k0s_controlplane_controller.go | 174 +++++++++- .../k0smotroncluster_controller.go | 1 - .../k0smotron.io/k0smotroncluster_service.go | 27 +- .../k0smotroncluster_service_test.go | 106 ------ internal/controller/util/nodes.go | 29 ++ internal/controller/util/nodes_test.go | 112 +++++++ inttest/Makefile | 1 + inttest/Makefile.variables | 1 + ...capi_controlplane_docker_tunneling_test.go | 310 ++++++++++++++++++ 19 files changed, 772 insertions(+), 146 deletions(-) create mode 100644 internal/controller/util/nodes.go create mode 100644 internal/controller/util/nodes_test.go create mode 100644 inttest/capi-controlplane-docker-tunneling/capi_controlplane_docker_tunneling_test.go diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 93ea46c58..4dc3d5d48 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -106,6 +106,7 @@ jobs: - check-capi-docker-machinedeployment - check-capi-controlplane-docker - check-capi-controlplane-docker-downscaling + - check-capi-controlplane-docker-tunneling - check-capi-controlplane-docker-worker steps: - name: Check out code into the Go module directory @@ -158,4 +159,5 @@ jobs: run: | kind get kubeconfig > kind.conf export KUBECONFIG=$(realpath kind.conf) - KEEP_AFTER_TEST=true make -C inttest ${{ matrix.smoke-suite }} + docker system prune -f + make -C inttest ${{ matrix.smoke-suite }} diff --git a/api/bootstrap/v1beta1/k0s_types.go b/api/bootstrap/v1beta1/k0s_types.go index fff11a75b..12ad42b80 100644 --- a/api/bootstrap/v1beta1/k0s_types.go +++ b/api/bootstrap/v1beta1/k0s_types.go @@ -173,4 +173,19 @@ type K0sConfigSpec struct { // If the version field is specified, it is ignored, and whatever version is downloaded from the URL is used. // +kubebuilder:validation:Optional DownloadURL string `json:"downloadURL,omitempty"` + + // Tunneling defines the tunneling configuration for the cluster. + //+kubebuilder:validation:Optional + Tunneling TunnelingSpec `json:"tunneling,omitempty"` +} + +type TunnelingSpec struct { + // Enabled specifies whether tunneling is enabled. + //+kubebuilder:validation:Optional + //+kubebuilder:default=false + Enabled bool `json:"enabled,omitempty"` + // Server address of the tunneling server. + // If empty, k0smotron will try to detect worker node address for. + //+kubebuilder:validation:Optional + ServerAddress string `json:"serverAddress,omitempty"` } diff --git a/api/bootstrap/v1beta1/zz_generated.deepcopy.go b/api/bootstrap/v1beta1/zz_generated.deepcopy.go index 50fb2decd..bc614bbf3 100644 --- a/api/bootstrap/v1beta1/zz_generated.deepcopy.go +++ b/api/bootstrap/v1beta1/zz_generated.deepcopy.go @@ -68,6 +68,7 @@ func (in *K0sConfigSpec) DeepCopyInto(out *K0sConfigSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + out.Tunneling = in.Tunneling } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K0sConfigSpec. @@ -368,3 +369,18 @@ func (in *K0sWorkerConfigTemplateSpec) DeepCopy() *K0sWorkerConfigTemplateSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TunnelingSpec) DeepCopyInto(out *TunnelingSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TunnelingSpec. +func (in *TunnelingSpec) DeepCopy() *TunnelingSpec { + if in == nil { + return nil + } + out := new(TunnelingSpec) + in.DeepCopyInto(out) + return out +} diff --git a/api/controlplane/v1beta1/k0s_types.go b/api/controlplane/v1beta1/k0s_types.go index 49020ae6f..2a8ee2015 100644 --- a/api/controlplane/v1beta1/k0s_types.go +++ b/api/controlplane/v1beta1/k0s_types.go @@ -35,8 +35,8 @@ func init() { type K0sControlPlane struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec K0sControlPlaneSpec `json:"spec,omitempty"` + Spec K0sControlPlaneSpec `json:"spec,omitempty"` Status K0sControlPlaneStatus `json:"status,omitempty"` } diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml index fde625ab3..dc91248b1 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml @@ -80,6 +80,19 @@ spec: items: type: string type: array + tunneling: + description: Tunneling defines the tunneling configuration for the + cluster. + properties: + enabled: + default: false + description: Enabled specifies whether tunneling is enabled. + type: boolean + serverAddress: + description: Server address of the tunneling server. If empty, + k0smotron will try to detect worker node address for. + type: string + type: object version: description: 'Version is the version of k0s to use. In case this is not set, the latest version is used. Make sure the version is compatible diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml index 1e7b1e810..9165f2330 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml @@ -82,6 +82,19 @@ spec: items: type: string type: array + tunneling: + description: Tunneling defines the tunneling configuration for + the cluster. + properties: + enabled: + default: false + description: Enabled specifies whether tunneling is enabled. + type: boolean + serverAddress: + description: Server address of the tunneling server. If empty, + k0smotron will try to detect worker node address for. + type: string + type: object version: description: 'Version is the version of k0s to use. In case this is not set, the latest version is used. Make sure the version diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 90784733e..7897507bd 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -18,6 +18,18 @@ rules: - patch - update - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - apps resources: diff --git a/config/samples/capi/docker/kind.yaml b/config/samples/capi/docker/kind.yaml index 3681a9b63..4a494add1 100644 --- a/config/samples/capi/docker/kind.yaml +++ b/config/samples/capi/docker/kind.yaml @@ -6,4 +6,7 @@ nodes: - role: control-plane extraMounts: - hostPath: /var/run/docker.sock - containerPath: /var/run/docker.sock \ No newline at end of file + containerPath: /var/run/docker.sock + extraPortMappings: + - containerPort: 31443 + hostPort: 31443 diff --git a/internal/controller/bootstrap/controlplane_bootstrap_controller.go b/internal/controller/bootstrap/controlplane_bootstrap_controller.go index e4e89e1e9..bb0aa8ae3 100644 --- a/internal/controller/bootstrap/controlplane_bootstrap_controller.go +++ b/internal/controller/bootstrap/controlplane_bootstrap_controller.go @@ -63,6 +63,7 @@ const joinTokenFilePath = "/etc/k0s.token" // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status;machines;machines/status,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=exp.cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=secrets;events;configmaps,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete func (c *ControlPlaneController) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { log := log.FromContext(ctx).WithValues("K0sControllerConfig", req.NamespacedName) @@ -171,6 +172,9 @@ func (c *ControlPlaneController) Reconcile(ctx context.Context, req ctrl.Request } installCmd = createCPInstallCmdWithJoinToken(config, joinTokenFilePath) } + if config.Spec.Tunneling.Enabled { + files = append(files, c.genTunnelingFiles(ctx, scope, config)...) + } files = append(files, config.Spec.Files...) downloadCommands := createCPDownloadCommands(config) @@ -300,6 +304,65 @@ func (c *ControlPlaneController) genControlPlaneJoinFiles(ctx context.Context, s return files, err } +func (c *ControlPlaneController) genTunnelingFiles(_ context.Context, _ *Scope, kcs *bootstrapv1.K0sControllerConfig) []cloudinit.File { + tunnelingResources := ` +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: frpc-config + namespace: kube-system +data: + frpc.ini: | + [common] + server_addr = %s + server_port = 31700 + + [kube-apiserver] + type = tcp + local_ip = 10.96.0.1 + local_port = 443 + remote_port = 6443 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: frpc + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: frpc + template: + metadata: + labels: + app: frpc + spec: + containers: + - name: frpc + image: snowdreamtech/frpc:0.51.3 + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: frpc-config + mountPath: /etc/frp/frpc.ini + subPath: frpc.ini + volumes: + - name: frpc-config + configMap: + name: frpc-config + items: + - key: frpc.ini + path: frpc.ini + +` + return []cloudinit.File{{ + Path: "/var/lib/k0s/manifests/k0smotron-tunneling/manifest.yaml", + Permissions: "0644", + Content: fmt.Sprintf(tunnelingResources, kcs.Spec.Tunneling.ServerAddress), + }} +} + func (c *ControlPlaneController) getCerts(ctx context.Context, scope *Scope) ([]cloudinit.File, *secret.Certificate, error) { var files []cloudinit.File certificates := secret.NewCertificatesForInitialControlPlane(&kubeadmbootstrapv1.ClusterConfiguration{ diff --git a/internal/controller/controlplane/helper.go b/internal/controller/controlplane/helper.go index 5ccb17d86..132fef0c0 100644 --- a/internal/controller/controlplane/helper.go +++ b/internal/controller/controlplane/helper.go @@ -3,6 +3,7 @@ package controlplane import ( "context" "fmt" + "k8s.io/utils/pointer" "strings" "github.com/Masterminds/semver" @@ -74,14 +75,15 @@ func (c *K0sController) generateMachine(_ context.Context, name string, cluster } } -func (c *K0sController) createMachineFromTemplate(ctx context.Context, name string, _ *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) (*unstructured.Unstructured, error) { - machineFromTemplate, err := c.generateMachineFromTemplate(ctx, name, kcp) +func (c *K0sController) createMachineFromTemplate(ctx context.Context, name string, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) (*unstructured.Unstructured, error) { + machineFromTemplate, err := c.generateMachineFromTemplate(ctx, name, cluster, kcp) if err != nil { return nil, err } if err = c.Client.Patch(ctx, machineFromTemplate, client.Apply, &client.PatchOptions{ FieldManager: "k0smotron", + Force: pointer.Bool(true), }); err != nil { return nil, err } @@ -89,8 +91,8 @@ func (c *K0sController) createMachineFromTemplate(ctx context.Context, name stri return machineFromTemplate, nil } -func (c *K0sController) deleteMachineFromTemplate(ctx context.Context, name string, kcp *cpv1beta1.K0sControlPlane) error { - machineFromTemplate, err := c.generateMachineFromTemplate(ctx, name, kcp) +func (c *K0sController) deleteMachineFromTemplate(ctx context.Context, name string, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) error { + machineFromTemplate, err := c.generateMachineFromTemplate(ctx, name, cluster, kcp) if err != nil { return err } @@ -98,7 +100,7 @@ func (c *K0sController) deleteMachineFromTemplate(ctx context.Context, name stri return c.Client.Delete(ctx, machineFromTemplate) } -func (c *K0sController) generateMachineFromTemplate(ctx context.Context, name string, kcp *cpv1beta1.K0sControlPlane) (*unstructured.Unstructured, error) { +func (c *K0sController) generateMachineFromTemplate(ctx context.Context, name string, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) (*unstructured.Unstructured, error) { unstructuredMachineTemplate, err := c.getMachineTemplate(ctx, kcp) if err != nil { return nil, err @@ -130,7 +132,7 @@ func (c *K0sController) generateMachineFromTemplate(ctx context.Context, name st labels[k] = v } - labels[clusterv1.ClusterNameLabel] = kcp.Name + labels[clusterv1.ClusterNameLabel] = cluster.GetName() labels[clusterv1.MachineControlPlaneLabel] = "" labels[clusterv1.MachineControlPlaneNameLabel] = kcp.Name machine.SetLabels(labels) diff --git a/internal/controller/controlplane/k0s_controlplane_controller.go b/internal/controller/controlplane/k0s_controlplane_controller.go index 72d871416..5f2845bf3 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller.go +++ b/internal/controller/controlplane/k0s_controlplane_controller.go @@ -20,19 +20,19 @@ import ( "context" "errors" "fmt" + "github.com/k0sproject/k0smotron/internal/controller/util" - bootstrapv1 "github.com/k0sproject/k0smotron/api/bootstrap/v1beta1" - cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" kubeadmbootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - "sigs.k8s.io/cluster-api/util" capiutil "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/kubeconfig" @@ -40,6 +40,9 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" + + bootstrapv1 "github.com/k0sproject/k0smotron/api/bootstrap/v1beta1" + cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" ) const ( @@ -55,6 +58,7 @@ type K0sController struct { // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=k0scontrolplanes/status,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=k0scontrolplanes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch;update;patch @@ -102,6 +106,11 @@ func (c *K0sController) Reconcile(ctx context.Context, req ctrl.Request) (res ct return ctrl.Result{}, err } + if err := c.reconcileTunneling(ctx, cluster, kcp); err != nil { + log.Error(err, "Failed to reconcile tunneling") + return ctrl.Result{}, err + } + res, err = c.reconcile(ctx, cluster, kcp) if err != nil { return res, err @@ -160,7 +169,7 @@ func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv return fmt.Errorf("error deleting machine from template: %w", err) } - if err := c.deleteMachineFromTemplate(ctx, name, kcp); err != nil { + if err := c.deleteMachineFromTemplate(ctx, name, cluster, kcp); err != nil { return fmt.Errorf("error deleting machine from template: %w", err) } @@ -246,7 +255,162 @@ func (c *K0sController) ensureCertificates(ctx context.Context, cluster *cluster certificates := secret.NewCertificatesForInitialControlPlane(&kubeadmbootstrapv1.ClusterConfiguration{ CertificatesDir: "/var/lib/k0s/pki", }) - return certificates.LookupOrGenerate(ctx, c.Client, util.ObjectKey(cluster), *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane"))) + return certificates.LookupOrGenerate(ctx, c.Client, capiutil.ObjectKey(cluster), *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane"))) +} + +func (c *K0sController) reconcileTunneling(ctx context.Context, _ *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) error { + if !kcp.Spec.K0sConfigSpec.Tunneling.Enabled { + return nil + } + + if kcp.Spec.K0sConfigSpec.Tunneling.ServerAddress == "" { + ip, err := c.detectNodeIP(ctx, kcp) + if err != nil { + return fmt.Errorf("error detecting node IP: %w", err) + } + kcp.Spec.K0sConfigSpec.Tunneling.ServerAddress = ip + } + + frpsConfig := ` +[common] +bind_port = 7000 +` + frpsCMName := kcp.GetName() + "-frps-config" + cm := corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: frpsCMName, + Namespace: kcp.GetNamespace(), + }, + Data: map[string]string{ + "frps.ini": frpsConfig, + }, + } + + _ = ctrl.SetControllerReference(kcp, &cm, c.Scheme) + err := c.Client.Patch(ctx, &cm, client.Apply, &client.PatchOptions{FieldManager: "k0s-bootstrap"}) + if err != nil { + return fmt.Errorf("error creating ConfigMap: %w", err) + } + + frpsDeployment := appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: kcp.GetName() + "-frps", + Namespace: kcp.GetNamespace(), + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "k0smotron_cluster": kcp.GetName(), + "app": "frps", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "k0smotron_cluster": kcp.GetName(), + "app": "frps", + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{{ + Name: frpsCMName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: frpsCMName, + }, + Items: []corev1.KeyToPath{{ + Key: "frps.ini", + Path: "frps.ini", + }}, + }, + }, + }}, + Containers: []corev1.Container{{ + Name: "frps", + Image: "snowdreamtech/frps:0.51.3", + ImagePullPolicy: corev1.PullIfNotPresent, + Ports: []corev1.ContainerPort{ + { + Name: "api", + Protocol: corev1.ProtocolTCP, + ContainerPort: 7000, + }, + { + Name: "tunnel", + Protocol: corev1.ProtocolTCP, + ContainerPort: 6443, + }, + }, + VolumeMounts: []corev1.VolumeMount{{ + Name: frpsCMName, + MountPath: "/etc/frp/frps.ini", + SubPath: "frps.ini", + }}, + }}, + }}, + }, + } + _ = ctrl.SetControllerReference(kcp, &frpsDeployment, c.Scheme) + err = c.Client.Patch(ctx, &frpsDeployment, client.Apply, &client.PatchOptions{FieldManager: "k0s-bootstrap"}) + if err != nil { + return fmt.Errorf("error creating Deployment: %w", err) + } + + frpsService := corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: kcp.GetName() + "-frps", + Namespace: kcp.GetNamespace(), + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "k0smotron_cluster": kcp.GetName(), + "app": "frps", + }, + Ports: []corev1.ServicePort{{ + Name: "api", + Protocol: corev1.ProtocolTCP, + Port: 7000, + TargetPort: intstr.FromInt(7000), + NodePort: 31700, + }, { + Name: "tunnel", + Protocol: corev1.ProtocolTCP, + Port: 6443, + TargetPort: intstr.FromInt(6443), + NodePort: 31443, + }}, + Type: corev1.ServiceTypeNodePort, + }, + } + _ = ctrl.SetControllerReference(kcp, &frpsService, c.Scheme) + err = c.Client.Patch(ctx, &frpsService, client.Apply, &client.PatchOptions{FieldManager: "k0s-bootstrap"}) + if err != nil { + return fmt.Errorf("error creating Service: %w", err) + } + + return nil +} + +func (c *K0sController) detectNodeIP(ctx context.Context, _ *cpv1beta1.K0sControlPlane) (string, error) { + nodes, err := c.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + if err != nil { + return "", err + } + + return util.FindNodeAddress(nodes), nil } func machineName(base string, i int) string { diff --git a/internal/controller/k0smotron.io/k0smotroncluster_controller.go b/internal/controller/k0smotron.io/k0smotroncluster_controller.go index 31ef44e1e..98ca08d8a 100644 --- a/internal/controller/k0smotron.io/k0smotroncluster_controller.go +++ b/internal/controller/k0smotron.io/k0smotroncluster_controller.go @@ -60,7 +60,6 @@ type ClusterReconciler struct { // +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list // +kubebuilder:rbac:groups=core,resources=pods/exec,verbs=create // +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. diff --git a/internal/controller/k0smotron.io/k0smotroncluster_service.go b/internal/controller/k0smotron.io/k0smotroncluster_service.go index beb93213c..44872b07f 100644 --- a/internal/controller/k0smotron.io/k0smotroncluster_service.go +++ b/internal/controller/k0smotron.io/k0smotroncluster_service.go @@ -19,7 +19,7 @@ package k0smotronio import ( "context" "fmt" - "math/rand" + "github.com/k0sproject/k0smotron/internal/controller/util" "time" km "github.com/k0sproject/k0smotron/api/k0smotron.io/v1beta1" @@ -145,7 +145,7 @@ func (r *ClusterReconciler) reconcileServices(ctx context.Context, kmc km.Cluste if err != nil { return err } - kmc.Spec.ExternalAddress = r.findNodeAddress(nodes) + kmc.Spec.ExternalAddress = util.FindNodeAddress(nodes) if err := r.Client.Update(ctx, &kmc); err != nil { return err } @@ -153,26 +153,3 @@ func (r *ClusterReconciler) reconcileServices(ctx context.Context, kmc km.Cluste return nil } - -// findNodeAddress returns a random node address preferring external address if one is found -func (r *ClusterReconciler) findNodeAddress(nodes *v1.NodeList) string { - extAddr, intAddr := "", "" - - // Get random node from list - node := nodes.Items[rand.Intn(len(nodes.Items))] - - for _, addr := range node.Status.Addresses { - if addr.Type == v1.NodeExternalIP { - extAddr = addr.Address - break - } - if addr.Type == v1.NodeInternalIP { - intAddr = addr.Address - } - } - - if extAddr != "" { - return extAddr - } - return intAddr -} diff --git a/internal/controller/k0smotron.io/k0smotroncluster_service_test.go b/internal/controller/k0smotron.io/k0smotroncluster_service_test.go index d9dcb0c2b..fb40852a2 100644 --- a/internal/controller/k0smotron.io/k0smotroncluster_service_test.go +++ b/internal/controller/k0smotron.io/k0smotroncluster_service_test.go @@ -21,115 +21,9 @@ import ( km "github.com/k0sproject/k0smotron/api/k0smotron.io/v1beta1" "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func TestClusterReconciler_findNodeAddress(t *testing.T) { - - tests := []struct { - name string - nodes *v1.NodeList - want string - }{ - { - name: "when only internal is set", - nodes: &v1.NodeList{ - Items: []v1.Node{ - { - Status: v1.NodeStatus{ - Addresses: []v1.NodeAddress{ - { - Type: v1.NodeInternalIP, - Address: "1.2.3.4", - }, - }, - }, - }, - }, - }, - want: "1.2.3.4", - }, - { - name: "when only external is set", - nodes: &v1.NodeList{ - Items: []v1.Node{ - { - Status: v1.NodeStatus{ - Addresses: []v1.NodeAddress{ - { - Type: v1.NodeExternalIP, - Address: "1.2.3.4", - }, - }, - }, - }, - }, - }, - want: "1.2.3.4", - }, - { - name: "when both are set", - nodes: &v1.NodeList{ - Items: []v1.Node{ - { - Status: v1.NodeStatus{ - Addresses: []v1.NodeAddress{ - { - Type: v1.NodeExternalIP, - Address: "1.1.1.1", - }, - { - Type: v1.NodeInternalIP, - Address: "2.2.2.2", - }, - }, - }, - }, - }, - }, - want: "1.1.1.1", - }, - { - name: "when multiple addresses are set", - nodes: &v1.NodeList{ - Items: []v1.Node{ - { - Status: v1.NodeStatus{ - Addresses: []v1.NodeAddress{ - { - Type: v1.NodeInternalIP, - Address: "2.2.2.2", - }, - { - Type: v1.NodeExternalIP, - Address: "1.1.1.1", - }, - { - Type: v1.NodeInternalIP, - Address: "3.3.3.3", - }, - { - Type: v1.NodeExternalIP, - Address: "4.4.4.4", - }, - }, - }, - }, - }, - }, - want: "1.1.1.1", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &ClusterReconciler{} - got := r.findNodeAddress(tt.nodes) - assert.Equal(t, tt.want, got) - }) - } -} - func TestClusterReconciler_serviceAnnotations(t *testing.T) { tests := []struct { name string diff --git a/internal/controller/util/nodes.go b/internal/controller/util/nodes.go new file mode 100644 index 000000000..fd08b6ffa --- /dev/null +++ b/internal/controller/util/nodes.go @@ -0,0 +1,29 @@ +package util + +import ( + v1 "k8s.io/api/core/v1" + "math/rand" +) + +// FindNodeAddress returns a random node address preferring external address if one is found +func FindNodeAddress(nodes *v1.NodeList) string { + extAddr, intAddr := "", "" + + // Get random node from list + node := nodes.Items[rand.Intn(len(nodes.Items))] + + for _, addr := range node.Status.Addresses { + if addr.Type == v1.NodeExternalIP { + extAddr = addr.Address + break + } + if addr.Type == v1.NodeInternalIP { + intAddr = addr.Address + } + } + + if extAddr != "" { + return extAddr + } + return intAddr +} diff --git a/internal/controller/util/nodes_test.go b/internal/controller/util/nodes_test.go new file mode 100644 index 000000000..3e4120f75 --- /dev/null +++ b/internal/controller/util/nodes_test.go @@ -0,0 +1,112 @@ +package util + +import ( + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" +) + +func TestFindNodeAddress(t *testing.T) { + + tests := []struct { + name string + nodes *v1.NodeList + want string + }{ + { + name: "when only internal is set", + nodes: &v1.NodeList{ + Items: []v1.Node{ + { + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeInternalIP, + Address: "1.2.3.4", + }, + }, + }, + }, + }, + }, + want: "1.2.3.4", + }, + { + name: "when only external is set", + nodes: &v1.NodeList{ + Items: []v1.Node{ + { + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "1.2.3.4", + }, + }, + }, + }, + }, + }, + want: "1.2.3.4", + }, + { + name: "when both are set", + nodes: &v1.NodeList{ + Items: []v1.Node{ + { + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "1.1.1.1", + }, + { + Type: v1.NodeInternalIP, + Address: "2.2.2.2", + }, + }, + }, + }, + }, + }, + want: "1.1.1.1", + }, + { + name: "when multiple addresses are set", + nodes: &v1.NodeList{ + Items: []v1.Node{ + { + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeInternalIP, + Address: "2.2.2.2", + }, + { + Type: v1.NodeExternalIP, + Address: "1.1.1.1", + }, + { + Type: v1.NodeInternalIP, + Address: "3.3.3.3", + }, + { + Type: v1.NodeExternalIP, + Address: "4.4.4.4", + }, + }, + }, + }, + }, + }, + want: "1.1.1.1", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := FindNodeAddress(tt.nodes) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/inttest/Makefile b/inttest/Makefile index b06e501c5..0497621ff 100644 --- a/inttest/Makefile +++ b/inttest/Makefile @@ -34,3 +34,4 @@ clean: rm -rf .*.stamp check-monitoring: TIMEOUT=7m +check-capi-controlplane-docker-tunneling: TIMEOUT=10m diff --git a/inttest/Makefile.variables b/inttest/Makefile.variables index 5d48f6f9e..7032546a9 100644 --- a/inttest/Makefile.variables +++ b/inttest/Makefile.variables @@ -14,5 +14,6 @@ smoketests := \ check-capi-controlplane-docker \ check-capi-controlplane-docker-downscaling \ check-capi-controlplane-docker-worker \ + check-capi-controlplane-docker-tunneling \ check-monitoring \ check-capi-docker-machinedeployment \ diff --git a/inttest/capi-controlplane-docker-tunneling/capi_controlplane_docker_tunneling_test.go b/inttest/capi-controlplane-docker-tunneling/capi_controlplane_docker_tunneling_test.go new file mode 100644 index 000000000..205d35f7c --- /dev/null +++ b/inttest/capi-controlplane-docker-tunneling/capi_controlplane_docker_tunneling_test.go @@ -0,0 +1,310 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capicontolplanedockertunneling + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "strconv" + "strings" + "testing" + "time" + + k0stestutil "github.com/k0sproject/k0s/inttest/common" + "github.com/k0sproject/k0smotron/inttest/util" + + "github.com/stretchr/testify/suite" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +type CAPIControlPlaneDockerSuite struct { + suite.Suite + client *kubernetes.Clientset + restConfig *rest.Config + clusterYamlsPath string + ctx context.Context +} + +func TestCAPIControlPlaneDockerSuite(t *testing.T) { + s := CAPIControlPlaneDockerSuite{} + suite.Run(t, &s) +} + +func (s *CAPIControlPlaneDockerSuite) SetupSuite() { + kubeConfigPath := os.Getenv("KUBECONFIG") + s.Require().NotEmpty(kubeConfigPath, "KUBECONFIG env var must be set and point to kind cluster") + // Get kube client from kubeconfig + restCfg, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath) + s.Require().NoError(err) + s.Require().NotNil(restCfg) + s.restConfig = restCfg + + // Get kube client from kubeconfig + kubeClient, err := kubernetes.NewForConfig(restCfg) + s.Require().NoError(err) + s.Require().NotNil(kubeClient) + s.client = kubeClient + + tmpDir := s.T().TempDir() + s.clusterYamlsPath = tmpDir + "/cluster.yaml" + s.Require().NoError(os.WriteFile(s.clusterYamlsPath, []byte(dockerClusterYaml), 0644)) + + s.ctx, _ = util.NewSuiteContext(s.T()) +} + +func (s *CAPIControlPlaneDockerSuite) TestCAPIControlPlaneDocker() { + + // Apply the child cluster objects + s.applyClusterObjects() + defer func() { + keep := os.Getenv("KEEP_AFTER_TEST") + if keep == "true" { + return + } + if keep == "on-failure" && s.T().Failed() { + return + } + s.T().Log("Deleting cluster objects") + s.deleteCluster() + }() + s.T().Log("cluster objects applied, waiting for cluster to be ready") + + var localPort int + // nolint:staticcheck + err := wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + localPort, _ = getLBPort("docker-test-cluster-lb") + return localPort > 0, nil + }) + s.Require().NoError(err) + + s.T().Log("waiting to see admin kubeconfig secret") + kmcKC, err := util.GetKMCClientSet(s.ctx, s.client, "docker-test-cluster", "default", localPort) + s.Require().NoError(err) + + // nolint:staticcheck + err = wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + b, _ := s.client.RESTClient(). + Get(). + AbsPath("/healthz"). + DoRaw(context.Background()) + + return string(b) == "ok", nil + }) + s.Require().NoError(err) + + // nolint:staticcheck + err = wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + output, err := exec.Command("docker", "exec", "docker-test-cluster-docker-test-0", "k0s", "status").Output() + if err != nil { + return false, nil + } + + return strings.Contains(string(output), "Version:"), nil + }) + s.Require().NoError(err) + + s.T().Log("waiting for node to be ready") + s.Require().NoError(k0stestutil.WaitForNodeReadyStatus(s.ctx, kmcKC, "docker-test-cluster-docker-test-worker-0", corev1.ConditionTrue)) + + s.T().Log("waiting for frp server to be ready") + s.Require().NoError(k0stestutil.WaitForDeployment(s.ctx, s.client, "docker-test-frps", "default")) + + s.T().Log("waiting for frp client to be ready") + s.Require().NoError(k0stestutil.WaitForDeployment(s.ctx, kmcKC, "frpc", "kube-system")) + + s.T().Log("checking connectivity to the child cluster via tunnel") + + forwardedPort := 31443 + cl := http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + } + + // nolint:staticcheck + err = wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + resp, err := cl.Get("https://localhost:" + strconv.Itoa(forwardedPort) + "/healthz") + if err != nil { + return false, nil + } + + defer resp.Body.Close() + respBytes, err := io.ReadAll(resp.Body) + if err != nil { + return false, err + } + + return "ok" == string(respBytes), nil + }) + s.Require().NoError(err) + + tunneledKmcKC, err := util.GetKMCClientSet(s.ctx, s.client, "docker-test-cluster", "default", forwardedPort) + s.Require().NoError(err) + + s.T().Log("check for node to be ready via tunnel") + s.Require().NoError(k0stestutil.WaitForNodeReadyStatus(s.ctx, tunneledKmcKC, "docker-test-cluster-docker-test-worker-0", corev1.ConditionTrue)) + + s.Require().NoError(k0stestutil.WaitForDeployment(s.ctx, tunneledKmcKC, "frpc", "kube-system")) +} + +func (s *CAPIControlPlaneDockerSuite) applyClusterObjects() { + // Exec via kubectl + out, err := exec.Command("kubectl", "apply", "-f", s.clusterYamlsPath).CombinedOutput() + s.Require().NoError(err, "failed to apply cluster objects: %s", string(out)) +} + +func (s *CAPIControlPlaneDockerSuite) deleteCluster() { + // Exec via kubectl + out, err := exec.Command("kubectl", "delete", "-f", s.clusterYamlsPath).CombinedOutput() + s.Require().NoError(err, "failed to delete cluster objects: %s", string(out)) +} + +func getLBPort(name string) (int, error) { + b, err := exec.Command("docker", "inspect", name, "--format", "{{json .NetworkSettings.Ports}}").Output() + if err != nil { + return 0, fmt.Errorf("failed to get inspect info from container %s: %w", name, err) + } + + var ports map[string][]map[string]string + err = json.Unmarshal(b, &ports) + if err != nil { + return 0, fmt.Errorf("failed to unmarshal inspect info from container %s: %w", name, err) + } + + return strconv.Atoi(ports["6443/tcp"][0]["HostPort"]) +} + +var dockerClusterYaml = ` +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: docker-test-cluster + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + serviceDomain: cluster.local + services: + cidrBlocks: + - 10.128.0.0/12 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: K0sControlPlane + name: docker-test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: docker-test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: docker-test-cp-template + namespace: default +spec: + template: + spec: {} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K0sControlPlane +metadata: + name: docker-test +spec: + replicas: 1 + k0sConfigSpec: + tunneling: + enabled: true + k0s: + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + metadata: + name: k0s + spec: + api: + extraArgs: + anonymous-auth: "true" + telemetry: + enabled: false + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: docker-test-cp-template + namespace: default +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + name: docker-test + namespace: default +spec: +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Machine +metadata: + name: docker-test-worker-0 + namespace: default +spec: + version: v1.27.1 + clusterName: docker-test-cluster + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: K0sWorkerConfig + name: docker-test-worker-0 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachine + name: docker-test-worker-0 +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: K0sWorkerConfig +metadata: + name: docker-test-worker-0 + namespace: default +spec: + # version is deliberately different to be able to verify we actually pick it up :) + version: v1.27.1+k0s.0 + args: + - --labels=k0sproject.io/foo=bar + preStartCommands: + - echo -n "pre-start" > /tmp/pre-start + postStartCommands: + - echo -n "post-start" > /tmp/post-start + files: + - path: /tmp/test-file + content: test-file +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachine +metadata: + name: docker-test-worker-0 + namespace: default +spec: +` From 38977bdb115c548d638821f683db47c4dfc632d7 Mon Sep 17 00:00:00 2001 From: Alexey Makhov Date: Mon, 11 Sep 2023 14:19:11 +0300 Subject: [PATCH 2/4] Client connection tunneling prototype. Token-based auth Signed-off-by: Alexey Makhov --- .../controlplane_bootstrap_controller.go | 22 ++++++++-- .../k0s_controlplane_controller.go | 40 +++++++++++++++++-- 2 files changed, 55 insertions(+), 7 deletions(-) diff --git a/internal/controller/bootstrap/controlplane_bootstrap_controller.go b/internal/controller/bootstrap/controlplane_bootstrap_controller.go index bb0aa8ae3..28dabb013 100644 --- a/internal/controller/bootstrap/controlplane_bootstrap_controller.go +++ b/internal/controller/bootstrap/controlplane_bootstrap_controller.go @@ -173,7 +173,11 @@ func (c *ControlPlaneController) Reconcile(ctx context.Context, req ctrl.Request installCmd = createCPInstallCmdWithJoinToken(config, joinTokenFilePath) } if config.Spec.Tunneling.Enabled { - files = append(files, c.genTunnelingFiles(ctx, scope, config)...) + tunnelingFiles, err := c.genTunnelingFiles(ctx, scope, config) + if err != nil { + return ctrl.Result{}, fmt.Errorf("error generating tunneling files: %v", err) + } + files = append(files, tunnelingFiles...) } files = append(files, config.Spec.Files...) @@ -304,7 +308,15 @@ func (c *ControlPlaneController) genControlPlaneJoinFiles(ctx context.Context, s return files, err } -func (c *ControlPlaneController) genTunnelingFiles(_ context.Context, _ *Scope, kcs *bootstrapv1.K0sControllerConfig) []cloudinit.File { +func (c *ControlPlaneController) genTunnelingFiles(ctx context.Context, scope *Scope, kcs *bootstrapv1.K0sControllerConfig) ([]cloudinit.File, error) { + secretName := scope.Cluster.Name + "-frp-token" + frpSecret := corev1.Secret{} + err := c.Client.Get(ctx, client.ObjectKey{Namespace: scope.Cluster.Namespace, Name: secretName}, &frpSecret) + if err != nil { + return nil, fmt.Errorf("failed to get frp secret: %w", err) + } + frpToken := string(frpSecret.Data["value"]) + tunnelingResources := ` --- apiVersion: v1 @@ -315,8 +327,10 @@ metadata: data: frpc.ini: | [common] + authentication_method = token server_addr = %s server_port = 31700 + token = %s [kube-apiserver] type = tcp @@ -359,8 +373,8 @@ spec: return []cloudinit.File{{ Path: "/var/lib/k0s/manifests/k0smotron-tunneling/manifest.yaml", Permissions: "0644", - Content: fmt.Sprintf(tunnelingResources, kcs.Spec.Tunneling.ServerAddress), - }} + Content: fmt.Sprintf(tunnelingResources, kcs.Spec.Tunneling.ServerAddress, frpToken), + }}, nil } func (c *ControlPlaneController) getCerts(ctx context.Context, scope *Scope) ([]cloudinit.File, *secret.Certificate, error) { diff --git a/internal/controller/controlplane/k0s_controlplane_controller.go b/internal/controller/controlplane/k0s_controlplane_controller.go index 5f2845bf3..f5cb9a5f6 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller.go +++ b/internal/controller/controlplane/k0s_controlplane_controller.go @@ -20,8 +20,8 @@ import ( "context" "errors" "fmt" + "github.com/google/uuid" "github.com/k0sproject/k0smotron/internal/controller/util" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -258,7 +258,7 @@ func (c *K0sController) ensureCertificates(ctx context.Context, cluster *cluster return certificates.LookupOrGenerate(ctx, c.Client, capiutil.ObjectKey(cluster), *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane"))) } -func (c *K0sController) reconcileTunneling(ctx context.Context, _ *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) error { +func (c *K0sController) reconcileTunneling(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) error { if !kcp.Spec.K0sConfigSpec.Tunneling.Enabled { return nil } @@ -271,9 +271,16 @@ func (c *K0sController) reconcileTunneling(ctx context.Context, _ *clusterv1.Clu kcp.Spec.K0sConfigSpec.Tunneling.ServerAddress = ip } + frpToken, err := c.createFRPToken(ctx, cluster, kcp) + if err != nil { + return fmt.Errorf("error creating FRP token secret: %w", err) + } + frpsConfig := ` [common] bind_port = 7000 +authentication_method = token +token = ` + frpToken + ` ` frpsCMName := kcp.GetName() + "-frps-config" cm := corev1.ConfigMap{ @@ -291,7 +298,7 @@ bind_port = 7000 } _ = ctrl.SetControllerReference(kcp, &cm, c.Scheme) - err := c.Client.Patch(ctx, &cm, client.Apply, &client.PatchOptions{FieldManager: "k0s-bootstrap"}) + err = c.Client.Patch(ctx, &cm, client.Apply, &client.PatchOptions{FieldManager: "k0s-bootstrap"}) if err != nil { return fmt.Errorf("error creating ConfigMap: %w", err) } @@ -413,6 +420,33 @@ func (c *K0sController) detectNodeIP(ctx context.Context, _ *cpv1beta1.K0sContro return util.FindNodeAddress(nodes), nil } +func (c *K0sController) createFRPToken(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) (string, error) { + frpToken := uuid.New().String() + frpSecret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-frp-token", + Namespace: cluster.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + }, + Data: map[string][]byte{ + "value": []byte(frpToken), + }, + Type: clusterv1.ClusterSecretType, + } + + _ = ctrl.SetControllerReference(kcp, frpSecret, c.Scheme) + + return frpToken, c.Client.Patch(ctx, frpSecret, client.Apply, &client.PatchOptions{ + FieldManager: "k0smotron", + }) +} + func machineName(base string, i int) string { return fmt.Sprintf("%s-%d", base, i) } From ca6bb611454eb5ae5927e1dbc2c9284ebf68d74e Mon Sep 17 00:00:00 2001 From: Alexey Makhov Date: Wed, 13 Sep 2023 13:38:23 +0300 Subject: [PATCH 3/4] Client connection tunneling prototype. NodePorts in the config Signed-off-by: Alexey Makhov --- api/bootstrap/v1beta1/k0s_types.go | 10 ++++++++++ ...p.cluster.x-k8s.io_k0scontrollerconfigs.yaml | 12 ++++++++++++ ...plane.cluster.x-k8s.io_k0scontrolplanes.yaml | 12 ++++++++++++ .../controlplane_bootstrap_controller.go | 17 ++++++++++++++--- internal/controller/controlplane/helper.go | 2 -- .../controlplane/k0s_controlplane_controller.go | 16 +++++++++++++--- 6 files changed, 61 insertions(+), 8 deletions(-) diff --git a/api/bootstrap/v1beta1/k0s_types.go b/api/bootstrap/v1beta1/k0s_types.go index 12ad42b80..6854f51ca 100644 --- a/api/bootstrap/v1beta1/k0s_types.go +++ b/api/bootstrap/v1beta1/k0s_types.go @@ -188,4 +188,14 @@ type TunnelingSpec struct { // If empty, k0smotron will try to detect worker node address for. //+kubebuilder:validation:Optional ServerAddress string `json:"serverAddress,omitempty"` + // NodePort to publish for server port of the tunneling server. + // If empty, k0smotron will use the default one. + //+kubebuilder:validation:Optional + //+kubebuilder:default=31700 + ServerNodePort int32 `json:"serverNodePort,omitempty"` + // NodePort to publish for tunneling port. + // If empty, k0smotron will use the default one. + //+kubebuilder:validation:Optional + //+kubebuilder:default=31443 + TunnelingNodePort int32 `json:"tunnelingNodePort,omitempty"` } diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml index dc91248b1..257013bca 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml @@ -92,6 +92,18 @@ spec: description: Server address of the tunneling server. If empty, k0smotron will try to detect worker node address for. type: string + serverNodePort: + default: 31700 + description: NodePort to publish for server port of the tunneling + server. If empty, k0smotron will use the default one. + format: int32 + type: integer + tunnelingNodePort: + default: 31443 + description: NodePort to publish for tunneling port. If empty, + k0smotron will use the default one. + format: int32 + type: integer type: object version: description: 'Version is the version of k0s to use. In case this is diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml index 9165f2330..8e5839982 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml @@ -94,6 +94,18 @@ spec: description: Server address of the tunneling server. If empty, k0smotron will try to detect worker node address for. type: string + serverNodePort: + default: 31700 + description: NodePort to publish for server port of the tunneling + server. If empty, k0smotron will use the default one. + format: int32 + type: integer + tunnelingNodePort: + default: 31443 + description: NodePort to publish for tunneling port. If empty, + k0smotron will use the default one. + format: int32 + type: integer type: object version: description: 'Version is the version of k0s to use. In case this diff --git a/internal/controller/bootstrap/controlplane_bootstrap_controller.go b/internal/controller/bootstrap/controlplane_bootstrap_controller.go index 28dabb013..319e4ce07 100644 --- a/internal/controller/bootstrap/controlplane_bootstrap_controller.go +++ b/internal/controller/bootstrap/controlplane_bootstrap_controller.go @@ -137,12 +137,23 @@ func (c *ControlPlaneController) Reconcile(ctx context.Context, req ctrl.Request ) if config.Spec.K0s != nil { - //config.Spec.K0s.SetUnstructuredContent(map["spec"]interface{}{}) err = unstructured.SetNestedField(config.Spec.K0s.Object, scope.Cluster.Spec.ControlPlaneEndpoint.Host, "spec", "api", "externalAddress") if err != nil { return ctrl.Result{}, fmt.Errorf("error setting control plane endpoint: %v", err) } + if config.Spec.Tunneling.ServerAddress != "" { + sans, _, err := unstructured.NestedSlice(config.Spec.K0s.Object, "spec", "api", "sans") + if err != nil { + return ctrl.Result{}, fmt.Errorf("error getting sans from config: %v", err) + } + sans = append(sans, config.Spec.Tunneling.ServerAddress) + err = unstructured.SetNestedSlice(config.Spec.K0s.Object, sans, "spec", "api", "sans") + if err != nil { + return ctrl.Result{}, fmt.Errorf("error setting sans to the config: %v", err) + } + } + k0sConfigBytes, err := config.Spec.K0s.MarshalJSON() if err != nil { return ctrl.Result{}, fmt.Errorf("error marshalling k0s config: %v", err) @@ -329,7 +340,7 @@ data: [common] authentication_method = token server_addr = %s - server_port = 31700 + server_port = %d token = %s [kube-apiserver] @@ -373,7 +384,7 @@ spec: return []cloudinit.File{{ Path: "/var/lib/k0s/manifests/k0smotron-tunneling/manifest.yaml", Permissions: "0644", - Content: fmt.Sprintf(tunnelingResources, kcs.Spec.Tunneling.ServerAddress, frpToken), + Content: fmt.Sprintf(tunnelingResources, kcs.Spec.Tunneling.ServerAddress, kcs.Spec.Tunneling.ServerNodePort, frpToken), }}, nil } diff --git a/internal/controller/controlplane/helper.go b/internal/controller/controlplane/helper.go index 132fef0c0..e16bdabda 100644 --- a/internal/controller/controlplane/helper.go +++ b/internal/controller/controlplane/helper.go @@ -3,7 +3,6 @@ package controlplane import ( "context" "fmt" - "k8s.io/utils/pointer" "strings" "github.com/Masterminds/semver" @@ -83,7 +82,6 @@ func (c *K0sController) createMachineFromTemplate(ctx context.Context, name stri if err = c.Client.Patch(ctx, machineFromTemplate, client.Apply, &client.PatchOptions{ FieldManager: "k0smotron", - Force: pointer.Bool(true), }); err != nil { return nil, err } diff --git a/internal/controller/controlplane/k0s_controlplane_controller.go b/internal/controller/controlplane/k0s_controlplane_controller.go index f5cb9a5f6..5da4ce97c 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller.go +++ b/internal/controller/controlplane/k0s_controlplane_controller.go @@ -391,13 +391,13 @@ token = ` + frpToken + ` Protocol: corev1.ProtocolTCP, Port: 7000, TargetPort: intstr.FromInt(7000), - NodePort: 31700, + NodePort: kcp.Spec.K0sConfigSpec.Tunneling.ServerNodePort, }, { Name: "tunnel", Protocol: corev1.ProtocolTCP, Port: 6443, TargetPort: intstr.FromInt(6443), - NodePort: 31443, + NodePort: kcp.Spec.K0sConfigSpec.Tunneling.TunnelingNodePort, }}, Type: corev1.ServiceTypeNodePort, }, @@ -421,6 +421,16 @@ func (c *K0sController) detectNodeIP(ctx context.Context, _ *cpv1beta1.K0sContro } func (c *K0sController) createFRPToken(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) (string, error) { + secretName := cluster.Name + "-frp-token" + + var existingSecret corev1.Secret + err := c.Client.Get(ctx, client.ObjectKey{Name: secretName, Namespace: cluster.Namespace}, &existingSecret) + if err == nil { + return string(existingSecret.Data["value"]), nil + } else if !apierrors.IsNotFound(err) { + return "", err + } + frpToken := uuid.New().String() frpSecret := &corev1.Secret{ TypeMeta: metav1.TypeMeta{ @@ -428,7 +438,7 @@ func (c *K0sController) createFRPToken(ctx context.Context, cluster *clusterv1.C Kind: "Secret", }, ObjectMeta: metav1.ObjectMeta{ - Name: cluster.Name + "-frp-token", + Name: secretName, Namespace: cluster.Namespace, Labels: map[string]string{ clusterv1.ClusterNameLabel: cluster.Name, From 8142d1803603130b17d40d7ac35bd464aba0239f Mon Sep 17 00:00:00 2001 From: Alexey Makhov Date: Tue, 19 Sep 2023 15:27:56 +0300 Subject: [PATCH 4/4] Client connection tunneling prototype. Proxy mode Signed-off-by: Alexey Makhov --- .github/workflows/go.yml | 1 + api/bootstrap/v1beta1/k0s_types.go | 5 + ...cluster.x-k8s.io_k0scontrollerconfigs.yaml | 8 + ...ane.cluster.x-k8s.io_k0scontrolplanes.yaml | 8 + .../controlplane_bootstrap_controller.go | 17 +- .../k0s_controlplane_controller.go | 60 +++- internal/controller/controlplane/util.go | 67 +++- inttest/Makefile.variables | 1 + ...ontrolplane_docker_tunneling_proxy_test.go | 318 ++++++++++++++++++ ...capi_controlplane_docker_tunneling_test.go | 2 +- 10 files changed, 479 insertions(+), 8 deletions(-) create mode 100644 inttest/capi-controlplane-docker-tunneling-proxy/capi_controlplane_docker_tunneling_proxy_test.go diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 4dc3d5d48..559ba59a9 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -107,6 +107,7 @@ jobs: - check-capi-controlplane-docker - check-capi-controlplane-docker-downscaling - check-capi-controlplane-docker-tunneling + - check-capi-controlplane-docker-tunneling-proxy - check-capi-controlplane-docker-worker steps: - name: Check out code into the Go module directory diff --git a/api/bootstrap/v1beta1/k0s_types.go b/api/bootstrap/v1beta1/k0s_types.go index 6854f51ca..8837d8033 100644 --- a/api/bootstrap/v1beta1/k0s_types.go +++ b/api/bootstrap/v1beta1/k0s_types.go @@ -198,4 +198,9 @@ type TunnelingSpec struct { //+kubebuilder:validation:Optional //+kubebuilder:default=31443 TunnelingNodePort int32 `json:"tunnelingNodePort,omitempty"` + // Mode describes tunneling mode. + // If empty, k0smotron will use the default one. + //+kubebuilder:validation:Enum=tunnel;proxy + //+kubebuilder:default=tunnel + Mode string `json:"mode,omitempty"` } diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml index 257013bca..864cb4ed3 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_k0scontrollerconfigs.yaml @@ -88,6 +88,14 @@ spec: default: false description: Enabled specifies whether tunneling is enabled. type: boolean + mode: + default: tunnel + description: Mode describes tunneling mode. If empty, k0smotron + will use the default one. + enum: + - tunnel + - proxy + type: string serverAddress: description: Server address of the tunneling server. If empty, k0smotron will try to detect worker node address for. diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml index 8e5839982..f87ce3ee4 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_k0scontrolplanes.yaml @@ -90,6 +90,14 @@ spec: default: false description: Enabled specifies whether tunneling is enabled. type: boolean + mode: + default: tunnel + description: Mode describes tunneling mode. If empty, k0smotron + will use the default one. + enum: + - tunnel + - proxy + type: string serverAddress: description: Server address of the tunneling server. If empty, k0smotron will try to detect worker node address for. diff --git a/internal/controller/bootstrap/controlplane_bootstrap_controller.go b/internal/controller/bootstrap/controlplane_bootstrap_controller.go index 319e4ce07..eb549b641 100644 --- a/internal/controller/bootstrap/controlplane_bootstrap_controller.go +++ b/internal/controller/bootstrap/controlplane_bootstrap_controller.go @@ -328,6 +328,19 @@ func (c *ControlPlaneController) genTunnelingFiles(ctx context.Context, scope *S } frpToken := string(frpSecret.Data["value"]) + var modeConfig string + if kcs.Spec.Tunneling.Mode == "proxy" { + modeConfig = fmt.Sprintf(` + type = tcpmux + custom_domains = %s + multiplexer = httpconnect +`, scope.Cluster.Spec.ControlPlaneEndpoint.Host) + } else { + modeConfig = ` + remote_port = 6443 +` + } + tunnelingResources := ` --- apiVersion: v1 @@ -347,7 +360,7 @@ data: type = tcp local_ip = 10.96.0.1 local_port = 443 - remote_port = 6443 + %s --- apiVersion: apps/v1 kind: Deployment @@ -384,7 +397,7 @@ spec: return []cloudinit.File{{ Path: "/var/lib/k0s/manifests/k0smotron-tunneling/manifest.yaml", Permissions: "0644", - Content: fmt.Sprintf(tunnelingResources, kcs.Spec.Tunneling.ServerAddress, kcs.Spec.Tunneling.ServerNodePort, frpToken), + Content: fmt.Sprintf(tunnelingResources, kcs.Spec.Tunneling.ServerAddress, kcs.Spec.Tunneling.ServerNodePort, frpToken, modeConfig), }}, nil } diff --git a/internal/controller/controlplane/k0s_controlplane_controller.go b/internal/controller/controlplane/k0s_controlplane_controller.go index 5da4ce97c..e1281a8f5 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller.go +++ b/internal/controller/controlplane/k0s_controlplane_controller.go @@ -128,7 +128,7 @@ func (c *K0sController) Reconcile(ctx context.Context, req ctrl.Request) (res ct } -func (c *K0sController) reconcileKubeconfig(ctx context.Context, cluster *clusterv1.Cluster) error { +func (c *K0sController) reconcileKubeconfig(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) error { if cluster.Spec.ControlPlaneEndpoint.IsZero() { return errors.New("control plane endpoint is not set") } @@ -142,11 +142,53 @@ func (c *K0sController) reconcileKubeconfig(ctx context.Context, cluster *cluste return err } + if kcp.Spec.K0sConfigSpec.Tunneling.Enabled { + if kcp.Spec.K0sConfigSpec.Tunneling.Mode == "proxy" { + secretName := secret.Name(cluster.Name+"-proxied", secret.Kubeconfig) + err := c.Client.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: secretName}, &corev1.Secret{}) + if err != nil { + if apierrors.IsNotFound(err) { + kc, err := c.generateKubeconfig(ctx, cluster, fmt.Sprintf("https://%s", cluster.Spec.ControlPlaneEndpoint.String())) + if err != nil { + return err + } + + for cn := range kc.Clusters { + kc.Clusters[cn].ProxyURL = fmt.Sprintf("http://%s:%d", kcp.Spec.K0sConfigSpec.Tunneling.ServerAddress, kcp.Spec.K0sConfigSpec.Tunneling.TunnelingNodePort) + } + + err = c.createKubeconfigSecret(ctx, kc, cluster, secretName) + if err != nil { + return err + } + } + return err + } + } else { + secretName := secret.Name(cluster.Name+"-tunneled", secret.Kubeconfig) + err := c.Client.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: secretName}, &corev1.Secret{}) + if err != nil { + if apierrors.IsNotFound(err) { + kc, err := c.generateKubeconfig(ctx, cluster, fmt.Sprintf("https://%s:%d", kcp.Spec.K0sConfigSpec.Tunneling.ServerAddress, kcp.Spec.K0sConfigSpec.Tunneling.TunnelingNodePort)) + if err != nil { + return err + } + + err = c.createKubeconfigSecret(ctx, kc, cluster, secretName) + if err != nil { + return err + } + } + return err + } + } + } + return nil } func (c *K0sController) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) (ctrl.Result, error) { - err := c.reconcileKubeconfig(ctx, cluster) + err := c.reconcileKubeconfig(ctx, cluster, kcp) if err != nil { return ctrl.Result{}, fmt.Errorf("error reconciling kubeconfig secret: %w", err) } @@ -276,12 +318,24 @@ func (c *K0sController) reconcileTunneling(ctx context.Context, cluster *cluster return fmt.Errorf("error creating FRP token secret: %w", err) } - frpsConfig := ` + var frpsConfig string + if kcp.Spec.K0sConfigSpec.Tunneling.Mode == "proxy" { + frpsConfig = ` +[common] +bind_port = 7000 +tcpmux_httpconnect_port = 6443 +authentication_method = token +token = ` + frpToken + ` +` + } else { + frpsConfig = ` [common] bind_port = 7000 authentication_method = token token = ` + frpToken + ` ` + } + frpsCMName := kcp.GetName() + "-frps-config" cm := corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ diff --git a/internal/controller/controlplane/util.go b/internal/controller/controlplane/util.go index 0e846678b..75ad49ebd 100644 --- a/internal/controller/controlplane/util.go +++ b/internal/controller/controlplane/util.go @@ -2,11 +2,21 @@ package controlplane import ( "context" + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util" - cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" - + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/cluster-api/util/certs" + "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/secret" "sigs.k8s.io/controller-runtime/pkg/client" + + cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" ) func (c *K0sController) getMachineTemplate(ctx context.Context, kcp *cpv1beta1.K0sControlPlane) (*unstructured.Unstructured, error) { @@ -25,3 +35,56 @@ func (c *K0sController) getMachineTemplate(ctx context.Context, kcp *cpv1beta1.K } return machineTemplate, nil } + +func (c *K0sController) generateKubeconfig(ctx context.Context, cluster *clusterv1.Cluster, endpoint string) (*api.Config, error) { + clusterName := util.ObjectKey(cluster) + clusterCA, err := secret.GetFromNamespacedName(ctx, c.Client, clusterName, secret.ClusterCA) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, kubeconfig.ErrDependentCertificateNotFound + } + return nil, err + } + + cert, err := certs.DecodeCertPEM(clusterCA.Data[secret.TLSCrtDataName]) + if err != nil { + return nil, fmt.Errorf("failed to decode CA Cert: %w", err) + } else if cert == nil { + return nil, fmt.Errorf("certificate not found in config: %w", err) + } + + key, err := certs.DecodePrivateKeyPEM(clusterCA.Data[secret.TLSKeyDataName]) + if err != nil { + return nil, fmt.Errorf("failed to decode private key: %w", err) + } else if key == nil { + return nil, fmt.Errorf("CA private key not found: %w", err) + } + + cfg, err := kubeconfig.New(clusterName.Name, endpoint, cert, key) + if err != nil { + return nil, fmt.Errorf("failed to generate a kubeconfig: %w", err) + } + + return cfg, nil + +} + +func (c *K0sController) createKubeconfigSecret(ctx context.Context, cfg *api.Config, cluster *clusterv1.Cluster, secretName string) error { + cfgBytes, err := clientcmd.Write(*cfg) + if err != nil { + return fmt.Errorf("failed to serialize config to yaml: %w", err) + } + + clusterName := util.ObjectKey(cluster) + owner := metav1.OwnerReference{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: cluster.Name, + UID: cluster.UID, + } + + kcSecret := kubeconfig.GenerateSecretWithOwner(clusterName, cfgBytes, owner) + kcSecret.Name = secretName + + return c.Create(ctx, kcSecret) +} diff --git a/inttest/Makefile.variables b/inttest/Makefile.variables index 7032546a9..c69ce4d59 100644 --- a/inttest/Makefile.variables +++ b/inttest/Makefile.variables @@ -15,5 +15,6 @@ smoketests := \ check-capi-controlplane-docker-downscaling \ check-capi-controlplane-docker-worker \ check-capi-controlplane-docker-tunneling \ + check-capi-controlplane-docker-tunneling-proxy \ check-monitoring \ check-capi-docker-machinedeployment \ diff --git a/inttest/capi-controlplane-docker-tunneling-proxy/capi_controlplane_docker_tunneling_proxy_test.go b/inttest/capi-controlplane-docker-tunneling-proxy/capi_controlplane_docker_tunneling_proxy_test.go new file mode 100644 index 000000000..af6dde2e6 --- /dev/null +++ b/inttest/capi-controlplane-docker-tunneling-proxy/capi_controlplane_docker_tunneling_proxy_test.go @@ -0,0 +1,318 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capicontolplanedockertunneling + +import ( + "context" + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "net/http" + "net/url" + "os" + "os/exec" + "strconv" + "strings" + "testing" + "time" + + k0stestutil "github.com/k0sproject/k0s/inttest/common" + "github.com/k0sproject/k0smotron/inttest/util" + + "github.com/stretchr/testify/suite" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +type CAPIControlPlaneDockerSuite struct { + suite.Suite + client *kubernetes.Clientset + restConfig *rest.Config + clusterYamlsPath string + ctx context.Context +} + +func TestCAPIControlPlaneDockerSuite(t *testing.T) { + s := CAPIControlPlaneDockerSuite{} + suite.Run(t, &s) +} + +func (s *CAPIControlPlaneDockerSuite) SetupSuite() { + kubeConfigPath := os.Getenv("KUBECONFIG") + s.Require().NotEmpty(kubeConfigPath, "KUBECONFIG env var must be set and point to kind cluster") + // Get kube client from kubeconfig + restCfg, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath) + s.Require().NoError(err) + s.Require().NotNil(restCfg) + s.restConfig = restCfg + + // Get kube client from kubeconfig + kubeClient, err := kubernetes.NewForConfig(restCfg) + s.Require().NoError(err) + s.Require().NotNil(kubeClient) + s.client = kubeClient + + tmpDir := s.T().TempDir() + s.clusterYamlsPath = tmpDir + "/cluster.yaml" + s.Require().NoError(os.WriteFile(s.clusterYamlsPath, []byte(dockerClusterYaml), 0644)) + + s.ctx, _ = util.NewSuiteContext(s.T()) +} + +func (s *CAPIControlPlaneDockerSuite) TestCAPIControlPlaneDocker() { + + // Apply the child cluster objects + s.applyClusterObjects() + defer func() { + keep := os.Getenv("KEEP_AFTER_TEST") + if keep == "true" { + return + } + if keep == "on-failure" && s.T().Failed() { + return + } + s.T().Log("Deleting cluster objects") + s.deleteCluster() + }() + s.T().Log("cluster objects applied, waiting for cluster to be ready") + + var localPort int + // nolint:staticcheck + err := wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + localPort, _ = getLBPort("docker-test-cluster-lb") + return localPort > 0, nil + }) + s.Require().NoError(err) + + s.T().Log("waiting to see admin kubeconfig secret") + kmcKC, err := util.GetKMCClientSet(s.ctx, s.client, "docker-test-cluster", "default", localPort) + s.Require().NoError(err) + + // nolint:staticcheck + err = wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + b, _ := s.client.RESTClient(). + Get(). + AbsPath("/healthz"). + DoRaw(context.Background()) + + return string(b) == "ok", nil + }) + s.Require().NoError(err) + + // nolint:staticcheck + err = wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + output, err := exec.Command("docker", "exec", "docker-test-cluster-docker-test-0", "k0s", "status").Output() + if err != nil { + return false, nil + } + + return strings.Contains(string(output), "Version:"), nil + }) + s.Require().NoError(err) + + s.T().Log("waiting for node to be ready") + s.Require().NoError(k0stestutil.WaitForNodeReadyStatus(s.ctx, kmcKC, "docker-test-cluster-docker-test-worker-0", corev1.ConditionTrue)) + + s.T().Log("waiting for frp server to be ready") + s.Require().NoError(k0stestutil.WaitForDeployment(s.ctx, s.client, "docker-test-frps", "default")) + + s.T().Log("waiting for frp client to be ready") + s.Require().NoError(k0stestutil.WaitForDeployment(s.ctx, kmcKC, "frpc", "kube-system")) + + s.T().Log("checking connectivity to the child cluster via tunnel") + + forwardedPort := 31443 + + tunneledKmcKC, err := GetKMCClientSetWithProxy(s.ctx, s.client, "docker-test-cluster-proxied", "default", forwardedPort) + s.Require().NoError(err) + + s.T().Log("check for node to be ready via tunnel") + _, err = tunneledKmcKC.RESTClient(). + Get(). + AbsPath("/healthz"). + DoRaw(context.Background()) + s.Require().NoError(err) + + s.Require().NoError(k0stestutil.WaitForNodeReadyStatus(s.ctx, tunneledKmcKC, "docker-test-cluster-docker-test-worker-0", corev1.ConditionTrue)) + + s.Require().NoError(k0stestutil.WaitForDeployment(s.ctx, tunneledKmcKC, "frpc", "kube-system")) +} + +func GetKMCClientSetWithProxy(ctx context.Context, kc *kubernetes.Clientset, name string, namespace string, port int) (*kubernetes.Clientset, error) { + secretName := fmt.Sprintf("%s-kubeconfig", name) + // Wait first to see the secret exists + if err := util.WaitForSecret(ctx, kc, secretName, namespace); err != nil { + return nil, err + } + kubeConf, err := kc.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + kmcCfg, err := clientcmd.RESTConfigFromKubeConfig([]byte(kubeConf.Data["value"])) + if err != nil { + return nil, err + } + + // Override the host to point to the port forwarded API server + proxyURL, _ := url.Parse(fmt.Sprintf("http://localhost:%d", port)) + kmcCfg.Proxy = http.ProxyURL(proxyURL) + + return kubernetes.NewForConfig(kmcCfg) +} + +func (s *CAPIControlPlaneDockerSuite) applyClusterObjects() { + // Exec via kubectl + out, err := exec.Command("kubectl", "apply", "-f", s.clusterYamlsPath).CombinedOutput() + s.Require().NoError(err, "failed to apply cluster objects: %s", string(out)) +} + +func (s *CAPIControlPlaneDockerSuite) deleteCluster() { + // Exec via kubectl + out, err := exec.Command("kubectl", "delete", "-f", s.clusterYamlsPath).CombinedOutput() + s.Require().NoError(err, "failed to delete cluster objects: %s", string(out)) +} + +func getLBPort(name string) (int, error) { + b, err := exec.Command("docker", "inspect", name, "--format", "{{json .NetworkSettings.Ports}}").Output() + if err != nil { + return 0, fmt.Errorf("failed to get inspect info from container %s: %w", name, err) + } + + var ports map[string][]map[string]string + err = json.Unmarshal(b, &ports) + if err != nil { + return 0, fmt.Errorf("failed to unmarshal inspect info from container %s: %w", name, err) + } + + return strconv.Atoi(ports["6443/tcp"][0]["HostPort"]) +} + +var dockerClusterYaml = ` +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: docker-test-cluster + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + serviceDomain: cluster.local + services: + cidrBlocks: + - 10.128.0.0/12 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: K0sControlPlane + name: docker-test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: docker-test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: docker-test-cp-template + namespace: default +spec: + template: + spec: {} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K0sControlPlane +metadata: + name: docker-test +spec: + replicas: 1 + k0sConfigSpec: + tunneling: + enabled: true + mode: proxy + k0s: + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + metadata: + name: k0s + spec: + api: + extraArgs: + anonymous-auth: "true" + telemetry: + enabled: false + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: docker-test-cp-template + namespace: default +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + name: docker-test + namespace: default +spec: +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Machine +metadata: + name: docker-test-worker-0 + namespace: default +spec: + version: v1.27.1 + clusterName: docker-test-cluster + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: K0sWorkerConfig + name: docker-test-worker-0 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachine + name: docker-test-worker-0 +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: K0sWorkerConfig +metadata: + name: docker-test-worker-0 + namespace: default +spec: + # version is deliberately different to be able to verify we actually pick it up :) + version: v1.27.1+k0s.0 + args: + - --labels=k0sproject.io/foo=bar + preStartCommands: + - echo -n "pre-start" > /tmp/pre-start + postStartCommands: + - echo -n "post-start" > /tmp/post-start + files: + - path: /tmp/test-file + content: test-file +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachine +metadata: + name: docker-test-worker-0 + namespace: default +spec: +` diff --git a/inttest/capi-controlplane-docker-tunneling/capi_controlplane_docker_tunneling_test.go b/inttest/capi-controlplane-docker-tunneling/capi_controlplane_docker_tunneling_test.go index 205d35f7c..709374a64 100644 --- a/inttest/capi-controlplane-docker-tunneling/capi_controlplane_docker_tunneling_test.go +++ b/inttest/capi-controlplane-docker-tunneling/capi_controlplane_docker_tunneling_test.go @@ -162,7 +162,7 @@ func (s *CAPIControlPlaneDockerSuite) TestCAPIControlPlaneDocker() { }) s.Require().NoError(err) - tunneledKmcKC, err := util.GetKMCClientSet(s.ctx, s.client, "docker-test-cluster", "default", forwardedPort) + tunneledKmcKC, err := util.GetKMCClientSet(s.ctx, s.client, "docker-test-cluster-tunneled", "default", forwardedPort) s.Require().NoError(err) s.T().Log("check for node to be ready via tunnel")