Skip to content

Commit

Permalink
Remove and reset nodes during apply by setting reset: true (#417)
Browse files Browse the repository at this point in the history
* Add Uninstall phases

* Add docs for uninstall field

* Don't uninstall controllers in parallel

* Add back accidentally removed code

* Fix comments mentioning upgrade

* Make informational message more precise

* Rename uninstall to reset

* Remove notice about node removal limitation

* Update README docs
  • Loading branch information
0SkillAllLuck authored Sep 27, 2022
1 parent 0cbfc47 commit 5eae4c5
Show file tree
Hide file tree
Showing 17 changed files with 395 additions and 98 deletions.
5 changes: 4 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,6 @@ K0sctl is ready for use and in continuous development. It is still at a stage wh
Missing major features include at least:

* The released binaries have not been signed
* Nodes can't be removed
* The configuration specification and command-line interface options are still evolving

## Usage
Expand Down Expand Up @@ -481,6 +480,10 @@ Localhost connection options. Can be used to use the local host running k0sctl a

This must be set `true` to enable the localhost connection.

###### `spec.hosts[*].reset` <boolean> (optional) (default: `false`)

If set to `true` k0sctl will remove the node from kubernetes and reset k0s on the host.

### K0s Fields

##### `spec.k0s.version` <string> (optional) (default: auto-discovery)
Expand Down
16 changes: 16 additions & 0 deletions cmd/apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,12 @@ var applyCommand = &cli.Command{
&phase.UpgradeWorkers{
NoDrain: ctx.Bool("no-drain"),
},
&phase.ResetWorkers{
NoDrain: ctx.Bool("no-drain"),
},
&phase.ResetControllers{
NoDrain: ctx.Bool("no-drain"),
},
&phase.RunHooks{Stage: "after", Action: "apply"},
)

Expand Down Expand Up @@ -128,6 +134,16 @@ var applyCommand = &cli.Command{
text := fmt.Sprintf("==> Finished in %s", duration)
log.Infof(Colorize.Green(text).String())

uninstalled := false
for _, host := range manager.Config.Spec.Hosts {
if host.Reset {
uninstalled = true
}
}
if uninstalled {
log.Info("There were nodes that got uninstalled during the apply phase. Please remove them from your k0sctl config file")
}

log.Infof("k0s cluster version %s is now installed", manager.Config.Spec.K0s.Version)
log.Infof("Tip: To access the cluster you can now fetch the admin kubeconfig using:")
log.Infof(" " + Colorize.Cyan("k0sctl kubeconfig").String())
Expand Down
14 changes: 13 additions & 1 deletion cmd/reset.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ var resetCommand = &cli.Command{
start := time.Now()

manager := phase.Manager{Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster)}
for _, h := range manager.Config.Spec.Hosts {
h.Reset = true
}

lockPhase := &phase.Lock{}
manager.AddPhase(
Expand All @@ -60,7 +63,16 @@ var resetCommand = &cli.Command{
&phase.PrepareHosts{},
&phase.GatherK0sFacts{},
&phase.RunHooks{Stage: "before", Action: "reset"},
&phase.Reset{},
&phase.ResetWorkers{
NoDrain: true,
NoDelete: true,
},
&phase.ResetControllers{
NoDrain: true,
NoDelete: true,
NoLeave: true,
},
&phase.ResetLeader{},
&phase.RunHooks{Stage: "after", Action: "reset"},
&phase.Unlock{Cancel: lockPhase.Cancel},
&phase.Disconnect{},
Expand Down
3 changes: 2 additions & 1 deletion configurer/linux/enterpriselinux/rhel.go
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
package enterpriselinux

import (
"strings"

"github.com/k0sproject/k0sctl/configurer"
k0slinux "github.com/k0sproject/k0sctl/configurer/linux"
"github.com/k0sproject/rig"
"github.com/k0sproject/rig/os/registry"
"strings"
)

// RHEL provides OS support for RedHat Enterprise Linux
Expand Down
2 changes: 1 addition & 1 deletion phase/download_binaries.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func (p *DownloadBinaries) Title() string {
func (p *DownloadBinaries) Prepare(config *v1beta1.Cluster) error {
p.Config = config
p.hosts = p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool {
return h.UploadBinary && h.Metadata.K0sBinaryVersion != config.Spec.K0s.Version
return !h.Reset && h.UploadBinary && h.Metadata.K0sBinaryVersion != config.Spec.K0s.Version
})
return nil
}
Expand Down
12 changes: 10 additions & 2 deletions phase/download_k0s.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,23 @@ func (p *DownloadK0s) Title() string {
func (p *DownloadK0s) Prepare(config *v1beta1.Cluster) error {
p.Config = config
p.hosts = p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool {
if h.Metadata.K0sBinaryVersion == p.Config.Spec.K0s.Version {
// Nothing to upload
if h.UploadBinary {
return false
}

// Nothing to upload
if h.Reset {
return false
}

// Upgrade is handled separately (k0s stopped, binary uploaded, k0s restarted)
if h.Metadata.NeedsUpgrade {
return false
}

if h.UploadBinary {
// The version is already correct
if h.Metadata.K0sBinaryVersion == p.Config.Spec.K0s.Version {
return false
}

Expand Down
2 changes: 1 addition & 1 deletion phase/install_controllers.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func (p *InstallControllers) Prepare(config *v1beta1.Cluster) error {
var controllers cluster.Hosts = p.Config.Spec.Hosts.Controllers()
p.leader = p.Config.Spec.K0sLeader()
p.hosts = controllers.Filter(func(h *cluster.Host) bool {
return h != p.leader && h.Metadata.K0sRunningVersion == ""
return !h.Reset && (h != p.leader && h.Metadata.K0sRunningVersion == "")
})

return nil
Expand Down
2 changes: 1 addition & 1 deletion phase/install_workers.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func (p *InstallWorkers) Prepare(config *v1beta1.Cluster) error {
p.Config = config
var workers cluster.Hosts = p.Config.Spec.Hosts.Workers()
p.hosts = workers.Filter(func(h *cluster.Host) bool {
return h.Metadata.K0sRunningVersion == "" || !h.Metadata.Ready
return !h.Reset && (h.Metadata.K0sRunningVersion == "" || !h.Metadata.Ready)
})
p.leader = p.Config.Spec.K0sLeader()

Expand Down
86 changes: 0 additions & 86 deletions phase/reset.go

This file was deleted.

129 changes: 129 additions & 0 deletions phase/reset_controllers.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
package phase

import (
"strings"

"github.com/Masterminds/semver"
"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
"github.com/k0sproject/rig/exec"
log "github.com/sirupsen/logrus"
)

// ResetControllers phase removes controllers marked for reset from the kubernetes and etcd clusters
// and resets k0s on the host
type ResetControllers struct {
GenericPhase

NoDrain bool
NoDelete bool
NoLeave bool

hosts cluster.Hosts
leader *cluster.Host
}

// Title for the phase
func (p *ResetControllers) Title() string {
return "Reset controllers"
}

// Prepare the phase
func (p *ResetControllers) Prepare(config *v1beta1.Cluster) error {
p.Config = config
p.leader = p.Config.Spec.K0sLeader()

var controllers cluster.Hosts = p.Config.Spec.Hosts.Controllers()
log.Debugf("%d controllers in total", len(controllers))
p.hosts = controllers.Filter(func(h *cluster.Host) bool {
return h.Reset
})
log.Debugf("ResetControllers phase prepared, %d controllers will be reset", len(p.hosts))
return nil
}

// ShouldRun is true when there are controllers that needs to be reset
func (p *ResetControllers) ShouldRun() bool {
return len(p.hosts) > 0
}

// CleanUp cleans up the environment override files on hosts
func (p *ResetControllers) CleanUp() {
for _, h := range p.hosts {
if len(h.Environment) > 0 {
if err := h.Configurer.CleanupServiceEnvironment(h, h.K0sServiceName()); err != nil {
log.Warnf("%s: failed to clean up service environment: %s", h, err.Error())
}
}
}
}

// Run the phase
func (p *ResetControllers) Run() error {
for _, h := range p.hosts {
log.Debugf("%s: draining node", h)
if !p.NoDrain && h.Role != "controller" {
if err := p.leader.DrainNode(&cluster.Host{
Metadata: cluster.HostMetadata{
Hostname: h.Metadata.Hostname,
},
}); err != nil {
log.Warnf("%s: failed to drain node: %s", h, err.Error())
}
}
log.Debugf("%s: draining node completed", h)

log.Debugf("%s: deleting node...", h)
if !p.NoDelete && h.Role != "controller" {
if err := p.leader.DeleteNode(&cluster.Host{
Metadata: cluster.HostMetadata{
Hostname: h.Metadata.Hostname,
},
}); err != nil {
log.Warnf("%s: failed to delete node: %s", h, err.Error())
}
}
log.Debugf("%s: deleting node", h)

if h.Configurer.ServiceIsRunning(h, h.K0sServiceName()) {
log.Debugf("%s: stopping k0s...", h)
if err := h.Configurer.StopService(h, h.K0sServiceName()); err != nil {
log.Warnf("%s: failed to stop k0s: %s", h, err.Error())
}
log.Debugf("%s: waiting for k0s to stop", h)
if err := h.WaitK0sServiceStopped(); err != nil {
log.Warnf("%s: failed to wait for k0s to stop: %s", h, err.Error())
}
log.Debugf("%s: stopping k0s completed", h)
}

log.Debugf("%s: leaving etcd...", h)
if !p.NoLeave {
if err := p.leader.LeaveEtcd(h); err != nil {
log.Warnf("%s: failed to leave etcd: %s", h, err.Error())
}
}
log.Debugf("%s: leaving etcd completed", h)

log.Debugf("%s: resetting k0s...", h)
out, err := h.ExecOutput(h.Configurer.K0sCmdf("reset"), exec.Sudo(h))
c, _ := semver.NewConstraint("<= 1.22.3+k0s.0")
running, _ := semver.NewVersion(h.Metadata.K0sBinaryVersion)
if err != nil {
log.Warnf("%s: k0s reported failure: %v", h, err)
if c.Check(running) && !strings.Contains(out, "k0s cleanup operations done") {
log.Warnf("%s: k0s reset failed, trying k0s cleanup", h)
}
}
log.Debugf("%s: resetting k0s completed", h)

log.Debugf("%s: removing config...", h)
if dErr := h.Configurer.DeleteFile(h, h.Configurer.K0sConfigPath()); dErr != nil {
log.Warnf("%s: failed to remove existing configuration %s: %s", h, h.Configurer.K0sConfigPath(), dErr)
}
log.Debugf("%s: removing config completed", h)

log.Infof("%s: reset", h)
}
return nil
}
Loading

0 comments on commit 5eae4c5

Please sign in to comment.