diff --git a/README.md b/README.md index 1e447975..30b5656d 100644 --- a/README.md +++ b/README.md @@ -113,7 +113,6 @@ K0sctl is ready for use and in continuous development. It is still at a stage wh Missing major features include at least: * The released binaries have not been signed -* Nodes can't be removed * The configuration specification and command-line interface options are still evolving ## Usage @@ -481,6 +480,10 @@ Localhost connection options. Can be used to use the local host running k0sctl a This must be set `true` to enable the localhost connection. +###### `spec.hosts[*].reset` <boolean> (optional) (default: `false`) + +If set to `true` k0sctl will remove the node from kubernetes and reset k0s on the host. + ### K0s Fields ##### `spec.k0s.version` <string> (optional) (default: auto-discovery) diff --git a/cmd/apply.go b/cmd/apply.go index f6bd5f5d..7a7825e2 100644 --- a/cmd/apply.go +++ b/cmd/apply.go @@ -86,6 +86,12 @@ var applyCommand = &cli.Command{ &phase.UpgradeWorkers{ NoDrain: ctx.Bool("no-drain"), }, + &phase.ResetWorkers{ + NoDrain: ctx.Bool("no-drain"), + }, + &phase.ResetControllers{ + NoDrain: ctx.Bool("no-drain"), + }, &phase.RunHooks{Stage: "after", Action: "apply"}, ) @@ -128,6 +134,16 @@ var applyCommand = &cli.Command{ text := fmt.Sprintf("==> Finished in %s", duration) log.Infof(Colorize.Green(text).String()) + uninstalled := false + for _, host := range manager.Config.Spec.Hosts { + if host.Reset { + uninstalled = true + } + } + if uninstalled { + log.Info("There were nodes that got uninstalled during the apply phase. Please remove them from your k0sctl config file") + } + log.Infof("k0s cluster version %s is now installed", manager.Config.Spec.K0s.Version) log.Infof("Tip: To access the cluster you can now fetch the admin kubeconfig using:") log.Infof(" " + Colorize.Cyan("k0sctl kubeconfig").String()) diff --git a/cmd/reset.go b/cmd/reset.go index 565f809e..dd772711 100644 --- a/cmd/reset.go +++ b/cmd/reset.go @@ -51,6 +51,9 @@ var resetCommand = &cli.Command{ start := time.Now() manager := phase.Manager{Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster)} + for _, h := range manager.Config.Spec.Hosts { + h.Reset = true + } lockPhase := &phase.Lock{} manager.AddPhase( @@ -60,7 +63,16 @@ var resetCommand = &cli.Command{ &phase.PrepareHosts{}, &phase.GatherK0sFacts{}, &phase.RunHooks{Stage: "before", Action: "reset"}, - &phase.Reset{}, + &phase.ResetWorkers{ + NoDrain: true, + NoDelete: true, + }, + &phase.ResetControllers{ + NoDrain: true, + NoDelete: true, + NoLeave: true, + }, + &phase.ResetLeader{}, &phase.RunHooks{Stage: "after", Action: "reset"}, &phase.Unlock{Cancel: lockPhase.Cancel}, &phase.Disconnect{}, diff --git a/configurer/linux/enterpriselinux/rhel.go b/configurer/linux/enterpriselinux/rhel.go index 9aaa1412..0ecca7ef 100644 --- a/configurer/linux/enterpriselinux/rhel.go +++ b/configurer/linux/enterpriselinux/rhel.go @@ -1,11 +1,12 @@ package enterpriselinux import ( + "strings" + "github.com/k0sproject/k0sctl/configurer" k0slinux "github.com/k0sproject/k0sctl/configurer/linux" "github.com/k0sproject/rig" "github.com/k0sproject/rig/os/registry" - "strings" ) // RHEL provides OS support for RedHat Enterprise Linux diff --git a/phase/download_binaries.go b/phase/download_binaries.go index 9d13dddf..2ab1d78b 100644 --- a/phase/download_binaries.go +++ b/phase/download_binaries.go @@ -29,7 +29,7 @@ func (p *DownloadBinaries) Title() string { func (p *DownloadBinaries) Prepare(config *v1beta1.Cluster) error { p.Config = config p.hosts = p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool { - return h.UploadBinary && h.Metadata.K0sBinaryVersion != config.Spec.K0s.Version + return !h.Reset && h.UploadBinary && h.Metadata.K0sBinaryVersion != config.Spec.K0s.Version }) return nil } diff --git a/phase/download_k0s.go b/phase/download_k0s.go index cf498293..b743eb6d 100644 --- a/phase/download_k0s.go +++ b/phase/download_k0s.go @@ -24,15 +24,23 @@ func (p *DownloadK0s) Title() string { func (p *DownloadK0s) Prepare(config *v1beta1.Cluster) error { p.Config = config p.hosts = p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool { - if h.Metadata.K0sBinaryVersion == p.Config.Spec.K0s.Version { + // Nothing to upload + if h.UploadBinary { return false } + // Nothing to upload + if h.Reset { + return false + } + + // Upgrade is handled separately (k0s stopped, binary uploaded, k0s restarted) if h.Metadata.NeedsUpgrade { return false } - if h.UploadBinary { + // The version is already correct + if h.Metadata.K0sBinaryVersion == p.Config.Spec.K0s.Version { return false } diff --git a/phase/install_controllers.go b/phase/install_controllers.go index cd9d84d6..03b2e40c 100644 --- a/phase/install_controllers.go +++ b/phase/install_controllers.go @@ -27,7 +27,7 @@ func (p *InstallControllers) Prepare(config *v1beta1.Cluster) error { var controllers cluster.Hosts = p.Config.Spec.Hosts.Controllers() p.leader = p.Config.Spec.K0sLeader() p.hosts = controllers.Filter(func(h *cluster.Host) bool { - return h != p.leader && h.Metadata.K0sRunningVersion == "" + return !h.Reset && (h != p.leader && h.Metadata.K0sRunningVersion == "") }) return nil diff --git a/phase/install_workers.go b/phase/install_workers.go index 168e9ac5..ab7b018c 100644 --- a/phase/install_workers.go +++ b/phase/install_workers.go @@ -27,7 +27,7 @@ func (p *InstallWorkers) Prepare(config *v1beta1.Cluster) error { p.Config = config var workers cluster.Hosts = p.Config.Spec.Hosts.Workers() p.hosts = workers.Filter(func(h *cluster.Host) bool { - return h.Metadata.K0sRunningVersion == "" || !h.Metadata.Ready + return !h.Reset && (h.Metadata.K0sRunningVersion == "" || !h.Metadata.Ready) }) p.leader = p.Config.Spec.K0sLeader() diff --git a/phase/reset.go b/phase/reset.go deleted file mode 100644 index c75e429f..00000000 --- a/phase/reset.go +++ /dev/null @@ -1,86 +0,0 @@ -package phase - -import ( - "fmt" - "strings" - - "github.com/Masterminds/semver" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" - "github.com/k0sproject/rig/exec" - log "github.com/sirupsen/logrus" -) - -// Reset uninstalls k0s from the hosts -type Reset struct { - GenericPhase - hosts cluster.Hosts -} - -// Title for the phase -func (p *Reset) Title() string { - return "Reset hosts" -} - -// Prepare the phase -func (p *Reset) Prepare(config *v1beta1.Cluster) error { - p.Config = config - var hosts cluster.Hosts = p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool { - return h.Metadata.K0sBinaryVersion != "" - }) - c, _ := semver.NewConstraint("< 0.11.0-rc1") - - for _, h := range hosts { - running, err := semver.NewVersion(h.Metadata.K0sBinaryVersion) - if err != nil { - return err - } - - if c.Check(running) { - return fmt.Errorf("reset is only supported on k0s >= 0.11.0-rc1") - } - } - - p.hosts = hosts - - return nil -} - -// Run the phase -func (p *Reset) Run() error { - return p.hosts.ParallelEach(func(h *cluster.Host) error { - log.Infof("%s: cleaning up service environment", h) - if err := h.Configurer.CleanupServiceEnvironment(h, h.K0sServiceName()); err != nil { - return err - } - - if h.Configurer.ServiceIsRunning(h, h.K0sServiceName()) { - log.Infof("%s: stopping k0s", h) - if err := h.Configurer.StopService(h, h.K0sServiceName()); err != nil { - return err - } - log.Infof("%s: waiting for k0s to stop", h) - if err := h.WaitK0sServiceStopped(); err != nil { - return err - } - } - - log.Infof("%s: running k0s reset", h) - out, err := h.ExecOutput(h.Configurer.K0sCmdf("reset"), exec.Sudo(h)) - c, _ := semver.NewConstraint("<= 1.22.3+k0s.0") - running, _ := semver.NewVersion(h.Metadata.K0sBinaryVersion) - - if dErr := h.Configurer.DeleteFile(h, h.Configurer.K0sConfigPath()); dErr != nil { - log.Warnf("%s: failed to remove existing configuration %s: %s", h, h.Configurer.K0sConfigPath(), dErr) - } - - if err != nil { - log.Warnf("%s: k0s reported failure: %v", h, err) - if c.Check(running) && strings.Contains(out, "k0s cleanup operations done") { - return nil - } - } - - return err - }) -} diff --git a/phase/reset_controllers.go b/phase/reset_controllers.go new file mode 100644 index 00000000..fd4a80b1 --- /dev/null +++ b/phase/reset_controllers.go @@ -0,0 +1,129 @@ +package phase + +import ( + "strings" + + "github.com/Masterminds/semver" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" + "github.com/k0sproject/rig/exec" + log "github.com/sirupsen/logrus" +) + +// ResetControllers phase removes controllers marked for reset from the kubernetes and etcd clusters +// and resets k0s on the host +type ResetControllers struct { + GenericPhase + + NoDrain bool + NoDelete bool + NoLeave bool + + hosts cluster.Hosts + leader *cluster.Host +} + +// Title for the phase +func (p *ResetControllers) Title() string { + return "Reset controllers" +} + +// Prepare the phase +func (p *ResetControllers) Prepare(config *v1beta1.Cluster) error { + p.Config = config + p.leader = p.Config.Spec.K0sLeader() + + var controllers cluster.Hosts = p.Config.Spec.Hosts.Controllers() + log.Debugf("%d controllers in total", len(controllers)) + p.hosts = controllers.Filter(func(h *cluster.Host) bool { + return h.Reset + }) + log.Debugf("ResetControllers phase prepared, %d controllers will be reset", len(p.hosts)) + return nil +} + +// ShouldRun is true when there are controllers that needs to be reset +func (p *ResetControllers) ShouldRun() bool { + return len(p.hosts) > 0 +} + +// CleanUp cleans up the environment override files on hosts +func (p *ResetControllers) CleanUp() { + for _, h := range p.hosts { + if len(h.Environment) > 0 { + if err := h.Configurer.CleanupServiceEnvironment(h, h.K0sServiceName()); err != nil { + log.Warnf("%s: failed to clean up service environment: %s", h, err.Error()) + } + } + } +} + +// Run the phase +func (p *ResetControllers) Run() error { + for _, h := range p.hosts { + log.Debugf("%s: draining node", h) + if !p.NoDrain && h.Role != "controller" { + if err := p.leader.DrainNode(&cluster.Host{ + Metadata: cluster.HostMetadata{ + Hostname: h.Metadata.Hostname, + }, + }); err != nil { + log.Warnf("%s: failed to drain node: %s", h, err.Error()) + } + } + log.Debugf("%s: draining node completed", h) + + log.Debugf("%s: deleting node...", h) + if !p.NoDelete && h.Role != "controller" { + if err := p.leader.DeleteNode(&cluster.Host{ + Metadata: cluster.HostMetadata{ + Hostname: h.Metadata.Hostname, + }, + }); err != nil { + log.Warnf("%s: failed to delete node: %s", h, err.Error()) + } + } + log.Debugf("%s: deleting node", h) + + if h.Configurer.ServiceIsRunning(h, h.K0sServiceName()) { + log.Debugf("%s: stopping k0s...", h) + if err := h.Configurer.StopService(h, h.K0sServiceName()); err != nil { + log.Warnf("%s: failed to stop k0s: %s", h, err.Error()) + } + log.Debugf("%s: waiting for k0s to stop", h) + if err := h.WaitK0sServiceStopped(); err != nil { + log.Warnf("%s: failed to wait for k0s to stop: %s", h, err.Error()) + } + log.Debugf("%s: stopping k0s completed", h) + } + + log.Debugf("%s: leaving etcd...", h) + if !p.NoLeave { + if err := p.leader.LeaveEtcd(h); err != nil { + log.Warnf("%s: failed to leave etcd: %s", h, err.Error()) + } + } + log.Debugf("%s: leaving etcd completed", h) + + log.Debugf("%s: resetting k0s...", h) + out, err := h.ExecOutput(h.Configurer.K0sCmdf("reset"), exec.Sudo(h)) + c, _ := semver.NewConstraint("<= 1.22.3+k0s.0") + running, _ := semver.NewVersion(h.Metadata.K0sBinaryVersion) + if err != nil { + log.Warnf("%s: k0s reported failure: %v", h, err) + if c.Check(running) && !strings.Contains(out, "k0s cleanup operations done") { + log.Warnf("%s: k0s reset failed, trying k0s cleanup", h) + } + } + log.Debugf("%s: resetting k0s completed", h) + + log.Debugf("%s: removing config...", h) + if dErr := h.Configurer.DeleteFile(h, h.Configurer.K0sConfigPath()); dErr != nil { + log.Warnf("%s: failed to remove existing configuration %s: %s", h, h.Configurer.K0sConfigPath(), dErr) + } + log.Debugf("%s: removing config completed", h) + + log.Infof("%s: reset", h) + } + return nil +} diff --git a/phase/reset_leader.go b/phase/reset_leader.go new file mode 100644 index 00000000..b2901bdf --- /dev/null +++ b/phase/reset_leader.go @@ -0,0 +1,75 @@ +package phase + +import ( + "strings" + + "github.com/Masterminds/semver" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" + "github.com/k0sproject/rig/exec" + log "github.com/sirupsen/logrus" +) + +// ResetLeader phase removes the leader from the cluster and thus destroys the cluster +type ResetLeader struct { + GenericPhase + leader *cluster.Host +} + +// Title for the phase +func (p *ResetLeader) Title() string { + return "Reset leader" +} + +// Prepare the phase +func (p *ResetLeader) Prepare(config *v1beta1.Cluster) error { + p.Config = config + p.leader = p.Config.Spec.K0sLeader() + return nil +} + +// CleanUp cleans up the environment override files on hosts +func (p *ResetLeader) CleanUp() { + if len(p.leader.Environment) > 0 { + if err := p.leader.Configurer.CleanupServiceEnvironment(p.leader, p.leader.K0sServiceName()); err != nil { + log.Warnf("%s: failed to clean up service environment: %s", p.leader, err.Error()) + } + } +} + +// Run the phase +func (p *ResetLeader) Run() error { + if p.leader.Configurer.ServiceIsRunning(p.leader, p.leader.K0sServiceName()) { + log.Debugf("%s: stopping k0s...", p.leader) + if err := p.leader.Configurer.StopService(p.leader, p.leader.K0sServiceName()); err != nil { + log.Warnf("%s: failed to stop k0s: %s", p.leader, err.Error()) + } + log.Debugf("%s: waiting for k0s to stop", p.leader) + if err := p.leader.WaitK0sServiceStopped(); err != nil { + log.Warnf("%s: failed to wait for k0s to stop: %s", p.leader, err.Error()) + } + log.Debugf("%s: stopping k0s completed", p.leader) + } + + log.Debugf("%s: resetting k0s...", p.leader) + out, err := p.leader.ExecOutput(p.leader.Configurer.K0sCmdf("reset"), exec.Sudo(p.leader)) + c, _ := semver.NewConstraint("<= 1.22.3+k0s.0") + running, _ := semver.NewVersion(p.leader.Metadata.K0sBinaryVersion) + if err != nil { + log.Warnf("%s: k0s reported failure: %v", p.leader, err) + if c.Check(running) && !strings.Contains(out, "k0s cleanup operations done") { + log.Warnf("%s: k0s reset failed, trying k0s cleanup", p.leader) + } + } + log.Debugf("%s: resetting k0s completed", p.leader) + + log.Debugf("%s: removing config...", p.leader) + if dErr := p.leader.Configurer.DeleteFile(p.leader, p.leader.Configurer.K0sConfigPath()); dErr != nil { + log.Warnf("%s: failed to remove existing configuration %s: %s", p.leader, p.leader.Configurer.K0sConfigPath(), dErr) + } + log.Debugf("%s: removing config completed", p.leader) + + log.Infof("%s: reset", p.leader) + + return nil +} diff --git a/phase/reset_workers.go b/phase/reset_workers.go new file mode 100644 index 00000000..b0ca64f1 --- /dev/null +++ b/phase/reset_workers.go @@ -0,0 +1,120 @@ +package phase + +import ( + "strings" + + "github.com/Masterminds/semver" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" + "github.com/k0sproject/rig/exec" + log "github.com/sirupsen/logrus" +) + +// ResetControllers phase removes workers marked for reset from the kubernetes cluster +// and resets k0s on the host +type ResetWorkers struct { + GenericPhase + + NoDrain bool + NoDelete bool + + hosts cluster.Hosts + leader *cluster.Host +} + +// Title for the phase +func (p *ResetWorkers) Title() string { + return "Reset workers" +} + +// Prepare the phase +func (p *ResetWorkers) Prepare(config *v1beta1.Cluster) error { + p.Config = config + p.leader = p.Config.Spec.K0sLeader() + + var workers cluster.Hosts = p.Config.Spec.Hosts.Workers() + log.Debugf("%d workers in total", len(workers)) + p.hosts = workers.Filter(func(h *cluster.Host) bool { + return h.Reset + }) + log.Debugf("ResetWorkers phase prepared, %d workers will be reset", len(p.hosts)) + return nil +} + +// ShouldRun is true when there are workers that needs to be reset +func (p *ResetWorkers) ShouldRun() bool { + return len(p.hosts) > 0 +} + +// CleanUp cleans up the environment override files on hosts +func (p *ResetWorkers) CleanUp() { + for _, h := range p.hosts { + if len(h.Environment) > 0 { + if err := h.Configurer.CleanupServiceEnvironment(h, h.K0sServiceName()); err != nil { + log.Warnf("%s: failed to clean up service environment: %s", h, err.Error()) + } + } + } +} + +// Run the phase +func (p *ResetWorkers) Run() error { + return p.hosts.ParallelEach(func(h *cluster.Host) error { + log.Debugf("%s: draining node", h) + if !p.NoDrain { + if err := p.leader.DrainNode(&cluster.Host{ + Metadata: cluster.HostMetadata{ + Hostname: h.Metadata.Hostname, + }, + }); err != nil { + log.Warnf("%s: failed to drain node: %s", h, err.Error()) + } + } + log.Debugf("%s: draining node completed", h) + + log.Debugf("%s: deleting node...", h) + if !p.NoDelete { + if err := p.leader.DeleteNode(&cluster.Host{ + Metadata: cluster.HostMetadata{ + Hostname: h.Metadata.Hostname, + }, + }); err != nil { + log.Warnf("%s: failed to delete node: %s", h, err.Error()) + } + } + log.Debugf("%s: deleting node", h) + + if h.Configurer.ServiceIsRunning(h, h.K0sServiceName()) { + log.Debugf("%s: stopping k0s...", h) + if err := h.Configurer.StopService(h, h.K0sServiceName()); err != nil { + log.Warnf("%s: failed to stop k0s: %s", h, err.Error()) + } + log.Debugf("%s: waiting for k0s to stop", h) + if err := h.WaitK0sServiceStopped(); err != nil { + log.Warnf("%s: failed to wait for k0s to stop: %s", h, err.Error()) + } + log.Debugf("%s: stopping k0s completed", h) + } + + log.Debugf("%s: resetting k0s...", h) + out, err := h.ExecOutput(h.Configurer.K0sCmdf("reset"), exec.Sudo(h)) + c, _ := semver.NewConstraint("<= 1.22.3+k0s.0") + running, _ := semver.NewVersion(h.Metadata.K0sBinaryVersion) + if err != nil { + log.Warnf("%s: k0s reported failure: %v", h, err) + if c.Check(running) && !strings.Contains(out, "k0s cleanup operations done") { + log.Warnf("%s: k0s reset failed, trying k0s cleanup", h) + } + } + log.Debugf("%s: resetting k0s completed", h) + + log.Debugf("%s: removing config...", h) + if dErr := h.Configurer.DeleteFile(h, h.Configurer.K0sConfigPath()); dErr != nil { + log.Warnf("%s: failed to remove existing configuration %s: %s", h, h.Configurer.K0sConfigPath(), dErr) + } + log.Debugf("%s: removing config completed", h) + + log.Infof("%s: reset", h) + return err + }) +} diff --git a/phase/upgrade_controllers.go b/phase/upgrade_controllers.go index 6d6d6b2a..abe3ad89 100644 --- a/phase/upgrade_controllers.go +++ b/phase/upgrade_controllers.go @@ -30,7 +30,7 @@ func (p *UpgradeControllers) Prepare(config *v1beta1.Cluster) error { var controllers cluster.Hosts = p.Config.Spec.Hosts.Controllers() log.Debugf("%d controllers in total", len(controllers)) p.hosts = controllers.Filter(func(h *cluster.Host) bool { - return h.Metadata.NeedsUpgrade + return !h.Reset && h.Metadata.NeedsUpgrade }) log.Debugf("UpgradeControllers phase prepared, %d controllers needs upgrade", len(p.hosts)) return nil diff --git a/phase/upgrade_workers.go b/phase/upgrade_workers.go index 280c8d00..6ef1d895 100644 --- a/phase/upgrade_workers.go +++ b/phase/upgrade_workers.go @@ -31,9 +31,9 @@ func (p *UpgradeWorkers) Prepare(config *v1beta1.Cluster) error { p.Config = config p.leader = p.Config.Spec.K0sLeader() var workers cluster.Hosts = p.Config.Spec.Hosts.Workers() - log.Debugf("%d controllers in total", len(workers)) + log.Debugf("%d workers in total", len(workers)) p.hosts = workers.Filter(func(h *cluster.Host) bool { - return h.Metadata.NeedsUpgrade + return !h.Reset && h.Metadata.NeedsUpgrade }) log.Debugf("UpgradeWorkers phase prepared, %d workers needs upgrade", len(p.hosts)) diff --git a/phase/upload_binaries.go b/phase/upload_binaries.go index d3d375ed..b74fe228 100644 --- a/phase/upload_binaries.go +++ b/phase/upload_binaries.go @@ -31,6 +31,11 @@ func (p *UploadBinaries) Prepare(config *v1beta1.Cluster) error { return false } + // Nothing to upload + if h.Reset { + return false + } + // Upgrade is handled separately (k0s stopped, binary uploaded, k0s restarted) if h.Metadata.NeedsUpgrade { return false diff --git a/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go b/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go index cec9c7c3..995b58ce 100644 --- a/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go +++ b/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go @@ -27,6 +27,7 @@ type Host struct { rig.Connection `yaml:",inline"` Role string `yaml:"role"` + Reset bool `yaml:"reset,omitempty"` PrivateInterface string `yaml:"privateInterface,omitempty"` PrivateAddress string `yaml:"privateAddress,omitempty"` Environment map[string]string `yaml:"environment,flow,omitempty"` @@ -403,6 +404,19 @@ func (h *Host) UncordonNode(node *Host) error { return h.Exec(h.Configurer.KubectlCmdf("uncordon %s", node.Metadata.Hostname), exec.Sudo(h)) } +// DeleteNode deletes the given node from kubernetes +func (h *Host) DeleteNode(node *Host) error { + return h.Exec(h.Configurer.KubectlCmdf("delete node %s", node.Metadata.Hostname), exec.Sudo(h)) +} + +func (h *Host) LeaveEtcd(node *Host) error { + etcdAddress := node.SSH.Address + if node.PrivateAddress != "" { + etcdAddress = node.PrivateAddress + } + return h.Exec(h.Configurer.K0sCmdf("etcd leave --peer-address %s", etcdAddress), exec.Sudo(h)) +} + // CheckHTTPStatus will perform a web request to the url and return an error if the http status is not the expected func (h *Host) CheckHTTPStatus(url string, expected ...int) error { status, err := h.Configurer.HTTPStatus(h, url) diff --git a/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/spec.go b/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/spec.go index 19f19f67..08740ddc 100644 --- a/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/spec.go +++ b/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/spec.go @@ -36,7 +36,7 @@ func (s *Spec) K0sLeader() *Host { // Pick the first controller that reports to be running and persist the choice for _, h := range controllers { - if h.Metadata.K0sBinaryVersion != "" && h.Metadata.K0sRunningVersion != "" { + if !h.Reset && h.Metadata.K0sBinaryVersion != "" && h.Metadata.K0sRunningVersion != "" { s.k0sLeader = h break }