diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml new file mode 100644 index 000000000..ab8dfc6f0 --- /dev/null +++ b/.github/workflows/e2e.yaml @@ -0,0 +1,220 @@ +name: e2e + +on: + push: + branches: + - 'master' + tags: + - 'v*' + pull_request: + branches: [ master ] + workflow_dispatch: + +env: + GO_VERSION: "1.21.4" + K8S_VERSION: "v1.27.3" + KIND_CLUSTER_NAME: "kind" + +jobs: + build: + name: build + runs-on: ubuntu-20.04 + steps: + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + id: go + + - name: Check out code + uses: actions/checkout@v2 + + - name: Build + run: | + mkdir _output + + docker build -t gcr.io/k8s-staging-kas-network-proxy/proxy-agent:master -f artifacts/images/agent-build.Dockerfile . + docker save gcr.io/k8s-staging-kas-network-proxy/proxy-agent:master > _output/konnectivity-agent.tar + docker build -t gcr.io/k8s-staging-kas-network-proxy/proxy-server:master -f artifacts/images/server-build.Dockerfile . + docker save gcr.io/k8s-staging-kas-network-proxy/proxy-server:master > > _output/konnectivity-server.tar + + - uses: actions/upload-artifact@v2 + with: + name: konnectivity-server + path: _output/konnectivity-server.tar + + - uses: actions/upload-artifact@v2 + with: + name: konnectivity-agent + path: _output/konnectivity-agent.tar + e2e: + name: e2e + runs-on: ubuntu-20.04 + timeout-minutes: 100 + needs: + - build + strategy: + fail-fast: false + matrix: + ipFamily: ["ipv4", "ipv6", "dual"] + env: + JOB_NAME: "kindnetd-e2e-${{ matrix.ipFamily }}" + IP_FAMILY: ${{ matrix.ipFamily }} + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Enable ipv4 and ipv6 forwarding + run: | + sudo sysctl -w net.ipv6.conf.all.forwarding=1 + sudo sysctl -w net.ipv4.ip_forward=1 + + - name: Set up environment (download dependencies) + run: | + TMP_DIR=$(mktemp -d) + # Test binaries + curl -L https://dl.k8s.io/${{ env.K8S_VERSION }}/kubernetes-test-linux-amd64.tar.gz -o ${TMP_DIR}/kubernetes-test-linux-amd64.tar.gz + tar xvzf ${TMP_DIR}/kubernetes-test-linux-amd64.tar.gz \ + --directory ${TMP_DIR} \ + --strip-components=3 kubernetes/test/bin/ginkgo kubernetes/test/bin/e2e.test + # kubectl + curl -L https://dl.k8s.io/${{ env.K8S_VERSION }}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl + # kind + curl -Lo ${TMP_DIR}/kind https://kind.sigs.k8s.io/dl/v0.17.0/kind-linux-amd64 + # Install + sudo cp ${TMP_DIR}/ginkgo /usr/local/bin/ginkgo + sudo cp ${TMP_DIR}/e2e.test /usr/local/bin/e2e.test + sudo cp ${TMP_DIR}/kubectl /usr/local/bin/kubectl + sudo cp ${TMP_DIR}/kind /usr/local/bin/kind + sudo chmod +x /usr/local/bin/* + + - name: Create multi node cluster + run: | + # output_dir + mkdir -p _artifacts + # create cluster + cat < _artifacts/kubeconfig.conf + + - uses: actions/download-artifact@v2 + with: + name: konnectivity-server + + - uses: actions/download-artifact@v2 + with: + name: konnectivity-agent + + - name: Install konnectivity + run: | + # preload konnectivity images + docker load --input konnectivity-server.tar + docker load --input konnectivity-agent.tar + /usr/local/bin/kind load docker-image gcr.io/k8s-staging-kas-network-proxy/proxy-server:master --name ${{ env.KIND_CLUSTER_NAME}} + /usr/local/bin/kind load docker-image gcr.io/k8s-staging-kas-network-proxy/proxy-agent:master --name ${{ env.KIND_CLUSTER_NAME}} + kubectl apply -f examples/kind/konnectivity-server.yaml + kubectl apply -f examples/kind/konnectivity-agent-ds.yaml + + - name: Get Cluster status + run: | + # wait network is ready + sleep 5 + /usr/local/bin/kubectl get nodes -o wide + /usr/local/bin/kubectl get pods -A + /usr/local/bin/kubectl wait --timeout=1m --for=condition=ready pods --namespace=kube-system -l k8s-app=kube-dns + # smoke test + /usr/local/bin/kubectl run test --image httpd:2 + /usr/local/bin/kubectl wait --timeout=1m --for=condition=ready pods test + /usr/local/bin/kubectl logs test + + - name: Workaround CoreDNS for IPv6 airgapped + if: ${{ matrix.ipFamily == 'ipv6' }} + run: | + # Patch CoreDNS to work in Github CI + # 1. Github CI doesnΒ΄t offer IPv6 connectivity, so CoreDNS should be configured + # to work in an offline environment: + # https://github.com/coredns/coredns/issues/2494#issuecomment-457215452 + # 2. Github CI adds following domains to resolv.conf search field: + # .net. + # CoreDNS should handle those domains and answer with NXDOMAIN instead of SERVFAIL + # otherwise pods stops trying to resolve the domain. + # Get the current config + original_coredns=$(/usr/local/bin/kubectl get -oyaml -n=kube-system configmap/coredns) + echo "Original CoreDNS config:" + echo "${original_coredns}" + # Patch it + fixed_coredns=$( + printf '%s' "${original_coredns}" | sed \ + -e 's/^.*kubernetes cluster\.local/& net/' \ + -e '/^.*upstream$/d' \ + -e '/^.*fallthrough.*$/d' \ + -e '/^.*forward . \/etc\/resolv.conf$/d' \ + -e '/^.*loop$/d' \ + ) + echo "Patched CoreDNS config:" + echo "${fixed_coredns}" + printf '%s' "${fixed_coredns}" | /usr/local/bin/kubectl apply -f - + + - name: Run tests + run: | + export KUBERNETES_CONFORMANCE_TEST='y' + export E2E_REPORT_DIR=${PWD}/_artifacts + + # Run tests + /usr/local/bin/ginkgo --nodes=25 \ + --focus="\[Conformance\]" \ + --skip="Feature|Federation|machinery|PerformanceDNS|DualStack|Disruptive|Serial|Slow|KubeProxy|LoadBalancer|GCE|Netpol|NetworkPolicy|NodeConformance" \ + /usr/local/bin/e2e.test \ + -- \ + --kubeconfig=${PWD}/_artifacts/kubeconfig.conf \ + --provider=local \ + --dump-logs-on-failure=false \ + --report-dir=${E2E_REPORT_DIR} \ + --disable-log-dump=true + + - name: Upload Junit Reports + if: always() + uses: actions/upload-artifact@v2 + with: + name: kind-junit-${{ env.JOB_NAME }}-${{ github.run_id }} + path: './_artifacts/*.xml' + + - name: Export logs + if: always() + run: | + /usr/local/bin/kind export logs --name ${KIND_CLUSTER_NAME} --loglevel=debug ./_artifacts/logs + + - name: Upload logs + if: always() + uses: actions/upload-artifact@v2 + with: + name: kind-logs-${{ env.JOB_NAME }}-${{ github.run_id }} + path: ./_artifacts/logs diff --git a/examples/kind/README.md b/examples/kind/README.md new file mode 100644 index 000000000..b5cac8ef1 --- /dev/null +++ b/examples/kind/README.md @@ -0,0 +1,51 @@ +# Use apiserver-network-proxy with KIND + + +Change to the `examples/kind` folder and create a `kind` cluster with the `kind.config` file + +```sh +$ kind create cluster --config kind.config +Creating cluster "kind" ... +DEBUG: docker/images.go:58] Image: kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72 present locally + βœ“ Ensuring node image (kindest/node:v1.27.3) πŸ–Ό +⠎⠁ Preparing nodes πŸ“¦ πŸ“¦ πŸ“¦ + +This node has joined the cluster: +* Certificate signing request was sent to apiserver and a response was received. +* The Kubelet was informed of the new secure connection details. + +Run 'kubectl get nodes' on the control-plane to see this node join the cluster. + βœ“ Joining worker nodes 🚜 +Set kubectl context to "kind-kind" +You can now use your cluster with: + +kubectl cluster-info --context kind-kind + +Have a nice day! πŸ‘‹ +``` + +Once the cluster is ready install the `apiserver-network-proxy` components: + +```sh +$ kubectl apply -f konnectivity-server.yaml +clusterrolebinding.rbac.authorization.k8s.io/system:konnectivity-server created +daemonset.apps/konnectivity-server created + +$ kubectl apply -f konnectivity-agent-ds.yaml +serviceaccount/konnectivity-agent created +``` + +To validate that it works, run a custom image and try to exec into the pod (it goes through the konnectivity proxy): +```sh +$ kubectl run test --image httpd:2 +pod/test created +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +test 0/1 ContainerCreating 0 4s +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +test 1/1 Running 0 6s +$ kubectl exec -it test bash +kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. +``` + diff --git a/examples/kind/egress_selector_configuration.yaml b/examples/kind/egress_selector_configuration.yaml new file mode 100644 index 000000000..e5f0f0e7d --- /dev/null +++ b/examples/kind/egress_selector_configuration.yaml @@ -0,0 +1,15 @@ +apiVersion: apiserver.k8s.io/v1beta1 +kind: EgressSelectorConfiguration +egressSelections: +- name: cluster + connection: + proxyProtocol: GRPC + transport: + uds: + udsName: /etc/kubernetes/konnectivity-server/konnectivity-server.socket +- name: master + connection: + proxyProtocol: Direct +- name: etcd + connection: + proxyProtocol: Direct diff --git a/examples/kind/kind.config b/examples/kind/kind.config new file mode 100644 index 000000000..e09b407e5 --- /dev/null +++ b/examples/kind/kind.config @@ -0,0 +1,32 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + ipFamily: ipv4 +nodes: +- role: control-plane + kubeadmConfigPatchesJSON6902: + - kind: ClusterConfiguration + patch: | + - op: add + path: /apiServer/certSANs/- + value: konnectivity-server.kube-system.svc.cluster.local + kubeadmConfigPatches: + - | + kind: ClusterConfiguration + apiServer: + extraArgs: + "egress-selector-config-file": "/etc/kubernetes/konnectivity-server-config/egress_selector_configuration.yaml" + extraVolumes: + - name: egress-selector-config-file + hostPath: "/etc/kubernetes/konnectivity-server-config/egress_selector_configuration.yaml" + mountPath: "/etc/kubernetes/konnectivity-server-config/egress_selector_configuration.yaml" + readOnly: true + - name: konnectivity-server + hostPath: "/etc/kubernetes/konnectivity-server" + mountPath: "/etc/kubernetes/konnectivity-server" + readOnly: true + extraMounts: + - hostPath: ./egress_selector_configuration.yaml + containerPath: /etc/kubernetes/konnectivity-server-config/egress_selector_configuration.yaml +- role: worker +- role: worker diff --git a/examples/kind/konnectivity-agent-ds.yaml b/examples/kind/konnectivity-agent-ds.yaml new file mode 100644 index 000000000..9ed5c6aac --- /dev/null +++ b/examples/kind/konnectivity-agent-ds.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: konnectivity-agent + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: konnectivity-agent + namespace: kube-system + name: konnectivity-agent +spec: + selector: + matchLabels: + k8s-app: konnectivity-agent + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: konnectivity-agent + spec: + priorityClassName: system-cluster-critical + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - operator: "Exists" + effect: "NoExecute" + nodeSelector: + kubernetes.io/os: linux + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: konnectivity-agent-container + image: gcr.io/k8s-staging-kas-network-proxy/proxy-agent:master + resources: + requests: + cpu: 50m + limits: + memory: 30Mi + command: [ "/proxy-agent"] + args: [ + "--logtostderr=true", + "--ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", + "--proxy-server-host=konnectivity-server.kube-system.svc.cluster.local", + "--proxy-server-port=8091", + "--sync-interval=5s", + "--sync-interval-cap=30s", + "--probe-interval=5s", + "--service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token", + "--agent-identifiers=ipv4=$(HOST_IP)" + ] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + livenessProbe: + httpGet: + scheme: HTTP + port: 8093 + path: /healthz + initialDelaySeconds: 15 + timeoutSeconds: 15 + volumeMounts: + - mountPath: /var/run/secrets/tokens + name: konnectivity-agent-token + serviceAccountName: konnectivity-agent + volumes: + - name: konnectivity-agent-token + projected: + sources: + - serviceAccountToken: + path: konnectivity-agent-token + audience: system:konnectivity-server diff --git a/examples/kind/konnectivity-server.yaml b/examples/kind/konnectivity-server.yaml new file mode 100644 index 000000000..ffb2d198e --- /dev/null +++ b/examples/kind/konnectivity-server.yaml @@ -0,0 +1,122 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:konnectivity-server + labels: + kubernetes.io/cluster-service: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: system:konnectivity-server +--- +apiVersion: v1 +kind: Service +metadata: + name: konnectivity-server + namespace: kube-system +spec: + selector: + k8s-app: konnectivity-server + clusterIP: None + ports: + - protocol: TCP + port: 8091 + targetPort: 8091 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: konnectivity-server + namespace: kube-system + name: konnectivity-server +spec: + selector: + matchLabels: + k8s-app: konnectivity-server + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: konnectivity-server + spec: + priorityClassName: system-cluster-critical + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - operator: "Exists" + nodeSelector: + node-role.kubernetes.io/control-plane: "" + hostNetwork: true + containers: + - name: konnectivity-server-container + image: gcr.io/k8s-staging-kas-network-proxy/proxy-server:master + resources: + requests: + cpu: 1m + command: [ "/proxy-server"] + args: [ + "--log-file=/var/log/konnectivity-server.log", + "--logtostderr=true", + "--log-file-max-size=0", + "--uds-name=/etc/kubernetes/konnectivity-server/konnectivity-server.socket", + "--cluster-cert=/etc/kubernetes/pki/apiserver.crt", + "--cluster-key=/etc/kubernetes/pki/apiserver.key", + "--server-port=0", + "--agent-port=8091", + "--health-port=8092", + "--admin-port=8093", + "--keepalive-time=1h", + "--mode=grpc", + "--agent-namespace=kube-system", + "--agent-service-account=konnectivity-agent", + "--kubeconfig=/etc/kubernetes/admin.conf", + "--authentication-audience=system:konnectivity-server", + ] + livenessProbe: + httpGet: + scheme: HTTP + host: 127.0.0.1 + port: 8092 + path: /healthz + initialDelaySeconds: 10 + timeoutSeconds: 60 + ports: + - name: serverport + containerPort: 8090 + hostPort: 8090 + - name: agentport + containerPort: 8091 + hostPort: 8091 + - name: healthport + containerPort: 8092 + hostPort: 8092 + - name: adminport + containerPort: 8093 + hostPort: 8093 + volumeMounts: + - name: varlogkonnectivityserver + mountPath: /var/log/konnectivity-server.log + readOnly: false + - name: kubernetes + mountPath: /etc/kubernetes + readOnly: true + - name: konnectivity-home + mountPath: /etc/kubernetes/konnectivity-server + volumes: + - name: varlogkonnectivityserver + hostPath: + path: /var/log/konnectivity-server.log + type: FileOrCreate + - name: kubernetes + hostPath: + path: /etc/kubernetes + - name: konnectivity-home + hostPath: + path: /etc/kubernetes/konnectivity-server + type: DirectoryOrCreate