From 5b2f503c4dbe75b7c52485390c4e60f9e441e2a2 Mon Sep 17 00:00:00 2001 From: "Abhijith D.A" Date: Sun, 6 Jun 2021 15:02:40 -0700 Subject: [PATCH 01/10] Update go.yml --- .github/workflows/go.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 64a7527..0b9ce2a 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -2,9 +2,9 @@ name: Go on: push: - branches: [ main ] + branches: [ main v1] pull_request: - branches: [ main ] + branches: [ main v1] jobs: From b714bd4eb3d81a1c5c1b08e9ac870f1aa5b2c0a5 Mon Sep 17 00:00:00 2001 From: "Abhijith D.A" Date: Sat, 1 May 2021 09:01:47 -0700 Subject: [PATCH 02/10] Fix paths and add update workflow images in docs. --- Makefile | 69 +++++++++++++++++++++--- sample/update/README.md | 4 +- sample/update/imgs/commit-precheck.svg | 25 +++++++++ sample/update/imgs/commit.svg | 25 +++++++++ sample/update/imgs/install.svg | 25 +++++++++ sample/update/imgs/postreboot.svg | 13 +++++ sample/update/imgs/preinstall.svg | 43 +++++++++++++++ sample/update/imgs/prereboot.svg | 13 +++++ sample/update/imgs/prerollback.svg | 13 +++++ sample/update/imgs/rollback-precheck.svg | 25 +++++++++ sample/update/imgs/rollback.svg | 25 +++++++++ sdk/Makefile | 2 +- sdk/template.md | 4 +- 13 files changed, 275 insertions(+), 11 deletions(-) create mode 100644 sample/update/imgs/commit-precheck.svg create mode 100644 sample/update/imgs/commit.svg create mode 100644 sample/update/imgs/install.svg create mode 100644 sample/update/imgs/postreboot.svg create mode 100644 sample/update/imgs/preinstall.svg create mode 100644 sample/update/imgs/prereboot.svg create mode 100644 sample/update/imgs/prerollback.svg create mode 100644 sample/update/imgs/rollback-precheck.svg create mode 100644 sample/update/imgs/rollback.svg diff --git a/Makefile b/Makefile index d8180b3..d0e439b 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,7 @@ help: ## Display this help message. TOP=$(CURDIR) BUILDDATE ?= $(shell /usr/bin/date -u +%Y%m%d%H%M%S) +TAR = /bin/tar # Go build related variables GOSRC=$(TOP) @@ -24,6 +25,8 @@ SDK_VERSION = 2.0 SDK_REVERSION = 1 SDK_SOURCE_PATH = $(TOP)/sdk +TAR = /bin/tar + .SILENT: .PHONY: all @@ -131,17 +134,71 @@ go-race: ## Run Go tests with race detector enabled exit 1; \ fi; -.PHONY: ship-asum-sdk -ship-asum-sdk: .copy_update_binaries - @echo "Creating ASUM SDK tar..." +SUM_PATH = $(TOP)/ +SAMPLE_UPDATE= $(SUM_PATH)/sample/update/ +PRODUCT_VERSION = 1.0 +tmp_SUM_SDK = $(TOP)/tmp/sum-sdk +tmp_SHIP_DIR = $(TOP)/tmp/ship + +# Ship SDK and create sample update RPM +SUM_PATH = $(TOP)/ +SAMPLE_UPDATE = $(SUM_PATH)/sample/update +PRODUCT_VERSION = 1.0 +tmp_SUM_SDK = $(TOP)/tmp/sum-sdk +tmp_SHIP_DIR = $(TOP)/tmp/ship + +.PHONY: ship-sum-sdk +ship-sum-sdk: .copy_update_binaries + @echo "Creating SUM SDK tar..." # cp $(TOP)/tools/mkrpm.sh $(SDK_SOURCE_PATH)/; ship_dir=$(TOP); \ if [ -n "$(SHIP_DIR)" ]; then \ ship_dir=$(SHIP_DIR); \ mkdir -p $${ship_dir} || exit 1; \ fi; \ - asum_sdk_tar="$${ship_dir}/$(SDK_NAME)-$(SDK_VERSION)-$(SDK_REVERSION)-$(BUILDDATE).tar.gz"; \ - /bin/tar -czf $${asum_sdk_tar} -C $(SDK_SOURCE_PATH) .; \ - echo "Successfully created ASUM SDK at $${asum_sdk_tar}."; + sum_sdk_tar="$${ship_dir}/$(SDK_NAME)-$(SDK_VERSION)-$(SDK_REVERSION)-$(BUILDDATE).tar.gz"; \ + /bin/tar -czf $${sum_sdk_tar} -C $(SDK_SOURCE_PATH) .; \ + echo "Successfully created ASUM SDK at $${sum_sdk_tar}."; + +.PHONY: getsumsdk +getsumsdk: + # NOTE: Usually the SUM SDK will be extracted from an already built tar. + # But here, in this case, we need to first build the + # SUM SDK tar and then consume it here. + echo "=============== Get SUM SDK ==============="; + ship_dir=$(tmp_SHIP_DIR); \ + rm -rf $${ship_dir}; \ + $(MAKE) -C $(SUM_PATH) ship-sum-sdk SHIP_DIR=$${ship_dir}; \ + echo "=============== extracting sum-sdk tar file =========================="; \ + mkdir -p $(tmp_SUM_SDK); \ + $(TAR) -xzvf $${ship_dir}/sum-sdk-*.tar.gz -C $(tmp_SUM_SDK); \ + if [ $$? -ne 0 ]; then \ + echo "ERROR: Failed to extract sum-sdk to $(tmp_SUM_SDK)"; \ + exit 1; \ + fi; + +.PHONY: sampleupdate +sampleupdate: getsumsdk + echo "=============== Creating a sample update RPM ==============="; + git checkout -- $(TOP)/sample/update/library/version/version.install $(TOP)/sample/update/rpm-info.json + myVersion=$(PRODUCT_VERSION).9; \ + sed -i -e "s%__VERSION__%$${myVersion}%g" $(TOP)/sample/update/library/version/version.install; \ + sed -i -e "s%__PRODUCT_VERSION__%$(PRODUCT_VERSION)%g" $(TOP)/sample/update/rpm-info.json; \ + JenkinsOptions=""; \ + ship_dir=$(tmp_SHIP_DIR); \ + if [ -n "$(JENKINS_URL)" ]; then \ + JenkinsOptions="BUILDDATE=$${BUILDTAG##*-}"; \ + ship_dir=$(JENKINS_UPLOAD_DEST); \ + fi; \ + $(MAKE) -C $(SAMPLE_UPDATE) update \ + ASUM_SDK_PATH=$(tmp_SUM_SDK) \ + SHIP_DIR=$${ship_dir} \ + UPDATE_VERSION=$${myVersion} \ + $${JenkinsOptions}; + git checkout -- $(TOP)/sample/update/library/version/version.install $(TOP)/sample/update/rpm-info.json + +.PHONY: clean-update +clean-update: + $(RM) -rf $(tmp_SUM_SDK) .NOTPARALLEL: diff --git a/sample/update/README.md b/sample/update/README.md index d5b3b80..fe13bdf 100644 --- a/sample/update/README.md +++ b/sample/update/README.md @@ -1,8 +1,8 @@ # Update plugins' and its dependencies -The document lists the plugins that are bundled into the update RPM using [ĀSUM SDK](https://github.com/VeritasOS/software-update-manager/blob/v1.0/sdk/README.md). +The document lists the plugins that are bundled into the update RPM using [ĀSUM SDK](https://stash.veritas.com/projects/AS/repos/platformx/browse/asum/sdk/README.md). -The update framework workflow details can be found in here: [ĀSUM update framework](https://github.com/VeritasOS/software-update-manager/blob/v1.0/README.md). +The update framework workflow details can be found in here: [ĀSUM update framework](https://stash.veritas.com/projects/AS/repos/platformx/browse/asum/docs/update.md). > NOTES diff --git a/sample/update/imgs/commit-precheck.svg b/sample/update/imgs/commit-precheck.svg new file mode 100644 index 0000000..ed535f4 --- /dev/null +++ b/sample/update/imgs/commit-precheck.svg @@ -0,0 +1,25 @@ + + + + + + +%3 + +cluster_0 + +commit-precheck plugins + + +version/version.commit-precheck + + +Check if backed up copy of appliance node version info exists... + + + + + diff --git a/sample/update/imgs/commit.svg b/sample/update/imgs/commit.svg new file mode 100644 index 0000000..a2120f8 --- /dev/null +++ b/sample/update/imgs/commit.svg @@ -0,0 +1,25 @@ + + + + + + +%3 + +cluster_0 + +commit plugins + + +version/version.commit + + +Restoring appliance node version... + + + + + diff --git a/sample/update/imgs/install.svg b/sample/update/imgs/install.svg new file mode 100644 index 0000000..e95921c --- /dev/null +++ b/sample/update/imgs/install.svg @@ -0,0 +1,25 @@ + + + + + + +%3 + +cluster_0 + +install plugins + + +version/version.install + + +Updating appliance version... + + + + + diff --git a/sample/update/imgs/postreboot.svg b/sample/update/imgs/postreboot.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/sample/update/imgs/postreboot.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/sample/update/imgs/preinstall.svg b/sample/update/imgs/preinstall.svg new file mode 100644 index 0000000..cf545bb --- /dev/null +++ b/sample/update/imgs/preinstall.svg @@ -0,0 +1,43 @@ + + + + + + +%3 + +cluster_0 + +preinstall plugins + + +version/save-for-rollback.preinstall + + +Saving current appliance node version info for rollback purposes... + + + + +version/status.preinstall + + +Checking appliance update status... + + + + +version/status.preinstall->version/save-for-rollback.preinstall + + + + +version/status.preinstall->version/save-for-rollback.preinstall + + + + + diff --git a/sample/update/imgs/prereboot.svg b/sample/update/imgs/prereboot.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/sample/update/imgs/prereboot.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/sample/update/imgs/prerollback.svg b/sample/update/imgs/prerollback.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/sample/update/imgs/prerollback.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/sample/update/imgs/rollback-precheck.svg b/sample/update/imgs/rollback-precheck.svg new file mode 100644 index 0000000..c5768c8 --- /dev/null +++ b/sample/update/imgs/rollback-precheck.svg @@ -0,0 +1,25 @@ + + + + + + +%3 + +cluster_0 + +rollback-precheck plugins + + +version/version.rollback-precheck + + +Check if backed up copy of appliance node version info exists... + + + + + diff --git a/sample/update/imgs/rollback.svg b/sample/update/imgs/rollback.svg new file mode 100644 index 0000000..564401d --- /dev/null +++ b/sample/update/imgs/rollback.svg @@ -0,0 +1,25 @@ + + + + + + +%3 + +cluster_0 + +rollback plugins + + +version/version.rollback + + +Restoring appliance node version... + + + + + diff --git a/sdk/Makefile b/sdk/Makefile index 5b9d7fb..46624bb 100644 --- a/sdk/Makefile +++ b/sdk/Makefile @@ -105,7 +105,7 @@ generate: pluginTypes=$(PLUGIN_TYPES); \ for pt in $${pluginTypes}; do \ echo "Generating image for $${pt}..."; \ - $(TOP)/scripts/pm list -library $(PLUGINS_LIBRARY) -type $${pt} -log-dir $(PLUGINS_LIBRARY)/../imgs/ -log-file $${pt}; \ + $(TOP)/scripts/asum pm list -library $(PLUGINS_LIBRARY) -type $${pt} -log-dir $(PLUGINS_LIBRARY)/../imgs/ -log-file $${pt}; \ done; @echo ======== Keeping only plugin graph images and cleaning logs in image dir. diff --git a/sdk/template.md b/sdk/template.md index 170a4cc..5de928b 100644 --- a/sdk/template.md +++ b/sdk/template.md @@ -1,8 +1,8 @@ # Update plugins' and its dependencies -The document lists the plugins that are bundled into the update RPM using [ĀSUM SDK](https://stash.veritas.com/projects/AS/repos/platformx/browse/asum/sdk/README.md). +The document lists the plugins that are bundled into the update RPM using [SUM SDK](https://github.com/VeritasOS/software-update-manager/blob/v1/sdk/README.md). -The update framework workflow details can be found in here: [ĀSUM update framework](https://stash.veritas.com/projects/AS/repos/platformx/browse/asum/docs/update.md). +The update framework workflow details can be found in here: [SUM update framework](https://github.com/VeritasOS/software-update-manager/blob/v1/README.md). > NOTES From 56b7847a8434b4556fa2edfff738a581df854be0 Mon Sep 17 00:00:00 2001 From: "Abhijith D.A" Date: Sat, 1 May 2021 10:01:56 -0700 Subject: [PATCH 03/10] Include Sample updates and update sum usage in README. --- Makefile | 109 +++++++++++++++++- README.md | 56 +++++++++ sample/update/Makefile | 6 +- .../imgs/commit-precheck.svg | 13 +++ .../update-no-version-change/imgs/commit.svg | 13 +++ .../update-no-version-change/imgs/install.svg | 25 ++++ .../imgs/postreboot.svg | 13 +++ .../imgs/preinstall.svg | 13 +++ .../imgs/prereboot.svg | 13 +++ .../imgs/prerollback.svg | 13 +++ .../imgs/rollback-precheck.svg | 13 +++ .../imgs/rollback.svg | 13 +++ .../library/sum/copy-sum.yml | 61 ++++++++++ .../library/sum/sum.install | 3 + .../update-no-version-change/rpm-info.json | 20 ++++ .../imgs/commit-precheck.svg | 13 +++ samples/update-reboot-commit/imgs/commit.svg | 13 +++ samples/update-reboot-commit/imgs/install.svg | 33 ++++++ .../update-reboot-commit/imgs/postreboot.svg | 13 +++ .../update-reboot-commit/imgs/preinstall.svg | 25 ++++ .../update-reboot-commit/imgs/prereboot.svg | 43 +++++++ .../update-reboot-commit/imgs/prerollback.svg | 13 +++ .../imgs/rollback-precheck.svg | 13 +++ .../update-reboot-commit/imgs/rollback.svg | 13 +++ .../library/asum-reboot/reboot.install | 5 + .../asum-reboot/update_status.prereboot | 3 + .../asum-reboot/update_status_reboot.yml | 56 +++++++++ .../library/instances/online-check.preinstall | 2 + .../library/instances/online-check.yml | 104 +++++++++++++++++ .../library/node-version/node-version.install | 2 + .../library/node-version/set-node-version.yml | 16 +++ .../library/vcs/vcs-stop.sh | 22 ++++ .../library/vcs/vcs.prereboot | 4 + samples/update-reboot-commit/rpm-info.json | 43 +++++++ samples/update/imgs/commit-precheck.svg | 13 +++ samples/update/imgs/commit.svg | 13 +++ samples/update/imgs/install.svg | 25 ++++ samples/update/imgs/postreboot.svg | 13 +++ samples/update/imgs/preinstall.svg | 13 +++ samples/update/imgs/prereboot.svg | 13 +++ samples/update/imgs/prerollback.svg | 13 +++ samples/update/imgs/rollback-precheck.svg | 13 +++ samples/update/imgs/rollback.svg | 13 +++ .../library/node-version/node-version.install | 3 + .../library/node-version/set-node-version.yml | 16 +++ samples/update/rpm-info.json | 40 +++++++ 46 files changed, 1000 insertions(+), 8 deletions(-) create mode 100644 samples/update-no-version-change/imgs/commit-precheck.svg create mode 100644 samples/update-no-version-change/imgs/commit.svg create mode 100644 samples/update-no-version-change/imgs/install.svg create mode 100644 samples/update-no-version-change/imgs/postreboot.svg create mode 100644 samples/update-no-version-change/imgs/preinstall.svg create mode 100644 samples/update-no-version-change/imgs/prereboot.svg create mode 100644 samples/update-no-version-change/imgs/prerollback.svg create mode 100644 samples/update-no-version-change/imgs/rollback-precheck.svg create mode 100644 samples/update-no-version-change/imgs/rollback.svg create mode 100644 samples/update-no-version-change/library/sum/copy-sum.yml create mode 100644 samples/update-no-version-change/library/sum/sum.install create mode 100644 samples/update-no-version-change/rpm-info.json create mode 100644 samples/update-reboot-commit/imgs/commit-precheck.svg create mode 100644 samples/update-reboot-commit/imgs/commit.svg create mode 100644 samples/update-reboot-commit/imgs/install.svg create mode 100644 samples/update-reboot-commit/imgs/postreboot.svg create mode 100644 samples/update-reboot-commit/imgs/preinstall.svg create mode 100644 samples/update-reboot-commit/imgs/prereboot.svg create mode 100644 samples/update-reboot-commit/imgs/prerollback.svg create mode 100644 samples/update-reboot-commit/imgs/rollback-precheck.svg create mode 100644 samples/update-reboot-commit/imgs/rollback.svg create mode 100644 samples/update-reboot-commit/library/asum-reboot/reboot.install create mode 100644 samples/update-reboot-commit/library/asum-reboot/update_status.prereboot create mode 100644 samples/update-reboot-commit/library/asum-reboot/update_status_reboot.yml create mode 100644 samples/update-reboot-commit/library/instances/online-check.preinstall create mode 100644 samples/update-reboot-commit/library/instances/online-check.yml create mode 100644 samples/update-reboot-commit/library/node-version/node-version.install create mode 100644 samples/update-reboot-commit/library/node-version/set-node-version.yml create mode 100644 samples/update-reboot-commit/library/vcs/vcs-stop.sh create mode 100644 samples/update-reboot-commit/library/vcs/vcs.prereboot create mode 100644 samples/update-reboot-commit/rpm-info.json create mode 100644 samples/update/imgs/commit-precheck.svg create mode 100644 samples/update/imgs/commit.svg create mode 100644 samples/update/imgs/install.svg create mode 100644 samples/update/imgs/postreboot.svg create mode 100644 samples/update/imgs/preinstall.svg create mode 100644 samples/update/imgs/prereboot.svg create mode 100644 samples/update/imgs/prerollback.svg create mode 100644 samples/update/imgs/rollback-precheck.svg create mode 100644 samples/update/imgs/rollback.svg create mode 100644 samples/update/library/node-version/node-version.install create mode 100644 samples/update/library/node-version/set-node-version.yml create mode 100644 samples/update/rpm-info.json diff --git a/Makefile b/Makefile index d0e439b..d2a6baa 100644 --- a/Makefile +++ b/Makefile @@ -127,7 +127,7 @@ go-race: ## Run Go tests with race detector enabled .PHONY: .copy_update_binaries .copy_update_binaries: build - echo "Copying ASUM SDK related binaries..."; + echo "Copying SUM SDK related binaries..."; cp -prf $(GOBIN)/* $(SDK_SOURCE_PATH)/scripts/; \ if [ $$? -ne 0 ]; then \ echo "ERROR: $${f} failed to copy to $(SDK_SOURCE_PATH)/scripts/"; \ @@ -158,7 +158,7 @@ ship-sum-sdk: .copy_update_binaries fi; \ sum_sdk_tar="$${ship_dir}/$(SDK_NAME)-$(SDK_VERSION)-$(SDK_REVERSION)-$(BUILDDATE).tar.gz"; \ /bin/tar -czf $${sum_sdk_tar} -C $(SDK_SOURCE_PATH) .; \ - echo "Successfully created ASUM SDK at $${sum_sdk_tar}."; + echo "Successfully created SUM SDK at $${sum_sdk_tar}."; .PHONY: getsumsdk getsumsdk: @@ -177,8 +177,46 @@ getsumsdk: exit 1; \ fi; -.PHONY: sampleupdate -sampleupdate: getsumsdk +# TODO: Update below target to download from github. +.PHONY: get_remote_sum_sdk +get_remote_sum_sdk: + # Avoid downloading it again when it's already present. Run `get_latest_asum_sdk` to get latest. +ifeq ($(shell ls $(tmp_SUM_SDK)/.no-latest 2> /dev/null),) + $(ECHO) "===== Downloading ASUM RPM Generation utility (asum-sdk) $(tmp_SUM_SDK) ====="; + $(RM) -rf $(tmp_SUM_SDK); + $(MKDIR) -p $(tmp_SUM_SDK); + px_api_url="$(HTTP_ARTIFACTORY)/api/storage/release/platformx/main"; \ + px_ver=$$($(CURL) $${px_api_url} | jq -c '.children[] | select(.folder)' | sort -V | tail -2 | head -1 | jq .uri); \ + px_ver=$${px_ver:2:-1}; \ + px_buildtag=$${px_ver##*-}; \ + $(ECHO) "PlatformX latest version-buildtag: $${px_ver}"; \ + rpm_name=$$($(CURL) $(HTTP_ARTIFACTORY)/api/storage/release/platformx/main/$${px_ver}/ \ + | jq '.children[].uri' | grep asum-sdk); \ + rpm_name=$${rpm_name%\"}; \ + rpm_name=$${rpm_name#\"}; \ + $(ECHO) "RPM: $${rpm_name}"; \ + $(ECHO) "===== Downloading $${rpm_name} ====="; \ + asum_url="$(HTTP_ARTIFACTORY)/release/platformx/main/$${px_ver}/"; \ + $(WGET) $${asum_url}/$${rpm_name} -P $(tmp_SUM_SDK); \ + if [ $$? -ne 0 ] ; then \ + $(ECHO) "ERROR: $(WGET) $${rpm_name} failed to download."; \ + exit 1; \ + fi ; \ + $(ECHO) Successfully downloaded $${rpm_name} into $(tmp_SUM_SDK); + + echo "=============== Extracting asum-sdk =========================="; \ + $(TAR) -xzvf $(tmp_SUM_SDK)/asum-sdk-*.tar.gz -C $(tmp_SUM_SDK); \ + if [ $$? -ne 0 ]; then \ + echo "ERROR: Failed to extract asum-sdk to $(tmp_SUM_SDK)"; \ + exit 1; \ + fi; + $(ECHO) "To avoid downloading latest SUM SDK again and again on local " \ + "builds, you can touch $(tmp_SUM_SDK)/.no-latest file. " + # touch $(tmp_SUM_SDK)/.no-latest +endif + +.PHONY: sampleupdate_usinglocalsdk +sampleupdate_usinglocalsdk: getsumsdk echo "=============== Creating a sample update RPM ==============="; git checkout -- $(TOP)/sample/update/library/version/version.install $(TOP)/sample/update/rpm-info.json myVersion=$(PRODUCT_VERSION).9; \ @@ -191,12 +229,73 @@ sampleupdate: getsumsdk ship_dir=$(JENKINS_UPLOAD_DEST); \ fi; \ $(MAKE) -C $(SAMPLE_UPDATE) update \ - ASUM_SDK_PATH=$(tmp_SUM_SDK) \ + SUM_SDK_PATH=$(tmp_SUM_SDK) \ SHIP_DIR=$${ship_dir} \ UPDATE_VERSION=$${myVersion} \ $${JenkinsOptions}; git checkout -- $(TOP)/sample/update/library/version/version.install $(TOP)/sample/update/rpm-info.json + +.PHONY: sampleupdates +sampleupdates: sampleupdate_usinglocalsdk sampleupdate sampleupdate_reboot_commit sampleupdate_no_version_change + +.PHONY: sampleupdate +sampleupdate: get_remote_sum_sdk + $(ECHO) "======== Generating sample update RPM using SUM SDK..."; \ + git checkout -- samples/update/library/node-version/node-version.install + myVersion=$(VERSION).0.9; \ + sed -i -e "s%__VERSION__%$${myVersion}%g" $(TOP)/samples/update/library/node-version/node-version.install; \ + ship_dir=$(TOP); \ + if [ -n "$(JENKINS_URL)" ]; then \ + Options="RPM_RELEASE=$${BUILDTAG##*-}"; \ + ship_dir=$(TOP)/$(PRODUCT)/$(BRANCH)/$(BUILDTAG); \ + fi; \ + $(MAKE) -C $(tmp_SUM_SDK) generate \ + PLUGINS_LIBRARY=$(TOP)/samples/update/library/ \ + RPM_INFO_FILE=$(TOP)/samples/update/rpm-info.json \ + RPM_URL=https://github.com/VeritasOS/software-update-manager \ + RPM_VERSION=$${myVersion} \ + SHIP_DIR=$${ship_dir} \ + $${Options} || exit $$? + git checkout -- samples/update/library/node-version/node-version.install + +.PHONY: sampleupdate_reboot_commit +sampleupdate_reboot_commit: get_remote_sum_sdk + $(ECHO) "======== Generating sample update RPM requiring reboot using SUM SDK..."; + git checkout -- samples/update-reboot-commit/library/node-version/node-version.install + myVersion=$(VERSION).9; \ + sed -i -e "s%__VERSION__%$${myVersion}%g" $(TOP)/samples/update-reboot-commit/library/node-version/node-version.install; \ + ship_dir=$(TOP); \ + if [ -n "$(JENKINS_URL)" ]; then \ + Options="RPM_RELEASE=$${BUILDTAG##*-}"; \ + ship_dir=$(TOP)/$(PRODUCT)/$(BRANCH)/$(BUILDTAG); \ + fi; \ + $(MAKE) -C $(tmp_SUM_SDK) generate \ + PLUGINS_LIBRARY=$(TOP)/samples/update-reboot-commit/library/ \ + RPM_INFO_FILE=$(TOP)/samples/update-reboot-commit/rpm-info.json \ + RPM_URL=https://github.com/VeritasOS/software-update-manager \ + RPM_VERSION=$${myVersion} \ + SHIP_DIR=$${ship_dir} \ + $${Options} || exit $$? + git checkout -- samples/update-reboot-commit/library/node-version/node-version.install + +.PHONY:sampleupdate_no_version_change +sampleupdate_no_version_change: + $(ECHO) "======== Generating sample update RPM using SUM SDK..."; \ + myVersion=2.0.0.0.$${BUILDTAG##*-}; \ + ship_dir=$(tmp_SHIP_DIR); \ + if [ -n "$(JENKINS_URL)" ]; then \ + Options="RPM_RELEASE=$${BUILDTAG##*-}"; \ + ship_dir=$(TOP)/$(PRODUCT)/$(BRANCH)/$(BUILDTAG); \ + fi; \ + $(MAKE) -C $(tmp_SUM_SDK) generate \ + PLUGINS_LIBRARY=$(TOP)/samples/update-no-version-change/library/ \ + RPM_INFO_FILE=$(TOP)/samples/update-no-version-change/rpm-info.json \ + RPM_URL=https://github.com/VeritasOS/software-update-manager \ + RPM_VERSION=$${myVersion} \ + SHIP_DIR=$${ship_dir} \ + $${Options} || exit $$? + .PHONY: clean-update clean-update: $(RM) -rf $(tmp_SUM_SDK) diff --git a/README.md b/README.md index 30be3e5..793e614 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,12 @@ The Software Update Manager (SUM) provides ability to update a system. It could - [Commit](#commit) - [Generating Update RPM](#generating-update-rpm) - [Sample Update](#sample-update) +- [Usage](#usage) + - [Add software to repository](#add-software-to-repository) + - [Install {{ software_type }} RPM](#install--software_type--rpm) + - [Commit {{ software_type }} RPM](#commit--software_type--rpm) + - [Rollback {{ software_type }} RPM](#rollback--software_type--rpm) + - [Delete software](#delete-software) @@ -100,3 +106,53 @@ For more details about SDK, refer to [SUM SDK](./sdk/README.md). A example usage of the framework can be found here: [sample-update](sample/update/Makefile). + +# Usage + +## Add software to repository + +```bash +$ ${sum_binary} repo add +-filepath={{ software_staging_area }}/{{ software_name }} +-repo={{ software_repo }}" +``` + +## Install {{ software_type }} RPM + +```bash +$ ${sum_binary} install +-filename={{ software_name }} +-type={{ software_type }} +-repo={{ software_repo }}" +``` + +## Commit {{ software_type }} RPM + +```bash +$ ${sum_binary} commit +-filename={{ software_name }} +-type={{ software_type }} +-repo={{ software_repo }} +-output-file={{ output_file }} +-output-format={{ output_format | default("yaml") }} +``` + +## Rollback {{ software_type }} RPM + +```bash +$ ${sum_binary} rollback +-filename={{ software_name }} +-type={{ software_type }} +-repo={{ software_repo }} +-output-file={{ output_file }} +-output-format={{ output_format | default("yaml") }} +``` + +## Delete software + +```bash + {{ asum_binary }} repo remove + -repo={{ software_repo }} + -type={{ software_type | default("") }} + -filename={{ software_name | default("") }} +``` diff --git a/sample/update/Makefile b/sample/update/Makefile index 20f1c1f..bc34b75 100644 --- a/sample/update/Makefile +++ b/sample/update/Makefile @@ -6,7 +6,7 @@ VERSION ?= 2.0 BRANCH ?= main TOP = $(shell pwd) -ASUM_SDK_PATH ?= $(TOP)/../../sdk/ +SUM_SDK_PATH ?= $(TOP)/../../sdk/ UPDATE_VERSION ?= $(VERSION).1 UPDATE_RELEASE ?= $(shell /usr/bin/date -u +%Y%m%d%H%M%S) @@ -21,7 +21,7 @@ all: update .PHONY: update update: @echo "======== Generating update RPM using ASUM SDK..."; \ - $(MAKE) -C $(ASUM_SDK_PATH) generate \ + $(MAKE) -C $(SUM_SDK_PATH) generate \ PLUGINS_LIBRARY=$(TOP)/library/ \ RPM_NAME=$(UPDATE_RPM_NAME) \ RPM_VERSION=$(UPDATE_VERSION) \ @@ -33,7 +33,7 @@ update: .PHONY: clean clean: - $(MAKE) -C $(ASUM_SDK_PATH) clean + $(MAKE) -C $(SUM_SDK_PATH) clean .PHONY: strip_watermark diff --git a/samples/update-no-version-change/imgs/commit-precheck.svg b/samples/update-no-version-change/imgs/commit-precheck.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-no-version-change/imgs/commit-precheck.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-no-version-change/imgs/commit.svg b/samples/update-no-version-change/imgs/commit.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-no-version-change/imgs/commit.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-no-version-change/imgs/install.svg b/samples/update-no-version-change/imgs/install.svg new file mode 100644 index 0000000..8290ce9 --- /dev/null +++ b/samples/update-no-version-change/imgs/install.svg @@ -0,0 +1,25 @@ + + + + + + +%3 + +cluster_0 + +install plugins + + +sum/sum.install + + +Install latest version of Software Update Manager + + + + + diff --git a/samples/update-no-version-change/imgs/postreboot.svg b/samples/update-no-version-change/imgs/postreboot.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-no-version-change/imgs/postreboot.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-no-version-change/imgs/preinstall.svg b/samples/update-no-version-change/imgs/preinstall.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-no-version-change/imgs/preinstall.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-no-version-change/imgs/prereboot.svg b/samples/update-no-version-change/imgs/prereboot.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-no-version-change/imgs/prereboot.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-no-version-change/imgs/prerollback.svg b/samples/update-no-version-change/imgs/prerollback.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-no-version-change/imgs/prerollback.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-no-version-change/imgs/rollback-precheck.svg b/samples/update-no-version-change/imgs/rollback-precheck.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-no-version-change/imgs/rollback-precheck.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-no-version-change/imgs/rollback.svg b/samples/update-no-version-change/imgs/rollback.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-no-version-change/imgs/rollback.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-no-version-change/library/sum/copy-sum.yml b/samples/update-no-version-change/library/sum/copy-sum.yml new file mode 100644 index 0000000..6de3fc0 --- /dev/null +++ b/samples/update-no-version-change/library/sum/copy-sum.yml @@ -0,0 +1,61 @@ +# Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +- name: "Install latest Software Update Manager." + hosts: localhost + remote_user: root + gather_facts: yes + vars: + web_console_ip: "{{ ansible_local.config.cluster.web_console_ip }}" + token: "{{ lookup('file', '/mnt/data/token/host_agent_token.jwt') }}" + tasks: + - name: Retreive taskID + block: + - name: Get list of all the tasks + uri: + url: https://{{ web_console_ip }}//ms/v1/api/tasks + method: GET + validate_certs: no + headers: + Content-Type: "application/json" + x-auth-token: "{{ token }}" + register: tasks + + - name: Fetch taskID of the latest running update task + set_fact: + taskID: "{{ item.id }}" + when: + - item != "" + - item.state == "InProgress" + - item.type == "Update node" + with_items: "{{ tasks.json.tasks }}" + + - name: Update task status + block: + - name: Inform about action + inframodules: + operation: "update-task" + taskID: "{{ taskID }}" + StepID: "INSTALL" + TaskMessage: "Installing latest Software Update Manager." + TaskPercent: "60" + when: + - taskID is defined and taskID != "" + + - name: Copy a new "Software Update Manager (SUM)" file into place, backing up the original if it differs from the copied version + ansible.builtin.copy: + src: "{{ lookup('env', 'PM_LIBRARY') }}/../sum" + dest: /opt/veritas/appliance/bin/ + # owner: root + # group: root + mode: '0655' + backup: yes + + - name: Update task status - Update successful + inframodules: + operation: update-task + taskID: "{{ taskID }}" + StepID: "INSTALL" + StepStatus: "DONE" + TaskPercent: "100" + TaskMessage: | + Node was updated successfully. \ No newline at end of file diff --git a/samples/update-no-version-change/library/sum/sum.install b/samples/update-no-version-change/library/sum/sum.install new file mode 100644 index 0000000..b7eb216 --- /dev/null +++ b/samples/update-no-version-change/library/sum/sum.install @@ -0,0 +1,3 @@ +# Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +Description=Install latest version of Software Update Manager +ExecStart=ansible-playbook ${PM_LIBRARY}/sum/copy-sum.yml \ No newline at end of file diff --git a/samples/update-no-version-change/rpm-info.json b/samples/update-no-version-change/rpm-info.json new file mode 100644 index 0000000..a9275ce --- /dev/null +++ b/samples/update-no-version-change/rpm-info.json @@ -0,0 +1,20 @@ +{ + "description": [ + "The update installs the latest version of Software Update Manager (SUM)." + ], + "type": "update", + "compatibility-info": [{ + "product-version": "2.*", + "install": { + "confirmation-message": [ + "Installing this update does not change the version.", + "", + "You can view the activity monitor for progress messages and the status of installation of this update.", + "" + ], + "requires-restart": false, + "supports-rollback": false, + "estimated-minutes": 5 + } + }] +} \ No newline at end of file diff --git a/samples/update-reboot-commit/imgs/commit-precheck.svg b/samples/update-reboot-commit/imgs/commit-precheck.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-reboot-commit/imgs/commit-precheck.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-reboot-commit/imgs/commit.svg b/samples/update-reboot-commit/imgs/commit.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-reboot-commit/imgs/commit.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-reboot-commit/imgs/install.svg b/samples/update-reboot-commit/imgs/install.svg new file mode 100644 index 0000000..570d749 --- /dev/null +++ b/samples/update-reboot-commit/imgs/install.svg @@ -0,0 +1,33 @@ + + + + + + +%3 + +cluster_0 + +install plugins + + +asum-reboot/reboot.install + + +Preparing for reboot + + + + +node-version/node-version.install + + +Update node version for Flex appliance + + + + + diff --git a/samples/update-reboot-commit/imgs/postreboot.svg b/samples/update-reboot-commit/imgs/postreboot.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-reboot-commit/imgs/postreboot.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-reboot-commit/imgs/preinstall.svg b/samples/update-reboot-commit/imgs/preinstall.svg new file mode 100644 index 0000000..c5c64ae --- /dev/null +++ b/samples/update-reboot-commit/imgs/preinstall.svg @@ -0,0 +1,25 @@ + + + + + + +%3 + +cluster_0 + +preinstall plugins + + +instances/online-check.preinstall + + +Checking for in-progress operations and status of application instances + + + + + diff --git a/samples/update-reboot-commit/imgs/prereboot.svg b/samples/update-reboot-commit/imgs/prereboot.svg new file mode 100644 index 0000000..1e1018e --- /dev/null +++ b/samples/update-reboot-commit/imgs/prereboot.svg @@ -0,0 +1,43 @@ + + + + + + +%3 + +cluster_0 + +prereboot plugins + + +asum-reboot/update_status.prereboot + + +Updating task status - system restart + + + + +vcs/vcs.prereboot + + +Stopping VERITAS Cluster Server (VCS) service... + + + + +asum-reboot/update_status.prereboot->vcs/vcs.prereboot + + + + +asum-reboot/update_status.prereboot->vcs/vcs.prereboot + + + + + diff --git a/samples/update-reboot-commit/imgs/prerollback.svg b/samples/update-reboot-commit/imgs/prerollback.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-reboot-commit/imgs/prerollback.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-reboot-commit/imgs/rollback-precheck.svg b/samples/update-reboot-commit/imgs/rollback-precheck.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-reboot-commit/imgs/rollback-precheck.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-reboot-commit/imgs/rollback.svg b/samples/update-reboot-commit/imgs/rollback.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update-reboot-commit/imgs/rollback.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update-reboot-commit/library/asum-reboot/reboot.install b/samples/update-reboot-commit/library/asum-reboot/reboot.install new file mode 100644 index 0000000..efd9676 --- /dev/null +++ b/samples/update-reboot-commit/library/asum-reboot/reboot.install @@ -0,0 +1,5 @@ +# Copyright (c) 2020 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +Description=Preparing for reboot +# NOTE: Call `asum reboot` as the last step in `install` step to automatically +# invoke (pre)reboot actions. +ExecStart=${PM_LIBRARY}/../asum reboot diff --git a/samples/update-reboot-commit/library/asum-reboot/update_status.prereboot b/samples/update-reboot-commit/library/asum-reboot/update_status.prereboot new file mode 100644 index 0000000..d3ce540 --- /dev/null +++ b/samples/update-reboot-commit/library/asum-reboot/update_status.prereboot @@ -0,0 +1,3 @@ +# Copyright (c) 2020 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +Description=Updating task status - system restart +ExecStart=/bin/ansible-playbook ${PM_LIBRARY}/asum-reboot/update_status_reboot.yml diff --git a/samples/update-reboot-commit/library/asum-reboot/update_status_reboot.yml b/samples/update-reboot-commit/library/asum-reboot/update_status_reboot.yml new file mode 100644 index 0000000..293cac4 --- /dev/null +++ b/samples/update-reboot-commit/library/asum-reboot/update_status_reboot.yml @@ -0,0 +1,56 @@ +# Copyright (c) 2020 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +- name: Updating task status - system restart + hosts: localhost + remote_user: root + gather_facts: yes + vars: + web_console_ip: "{{ ansible_local.config.cluster.web_console_ip }}" + token: "{{ lookup('file', '/mnt/data/token/host_agent_token.jwt') }}" + tasks: + - name: Get hostname + command: hostname -s + register: shortHostname + + - name: Retreive taskID + block: + - name: Get list of all the tasks + uri: + url: https://{{ web_console_ip }}//ms/v1/api/tasks + method: GET + validate_certs: no + headers: + Content-Type: "application/json" + x-auth-token: "{{ token }}" + register: tasks + + - name: Fetch taskID of the latest running update task + set_fact: + taskID: "{{ item.id }}" + when: + - item != "" + - item.state == "InProgress" + - item.type == "Update appliance and reboot" + with_items: "{{ tasks.json.tasks }}" + + - name: Update task status + block: + - name: Update the step POST-INSTALL as DONE + inframodules: + operation: "update-task" + taskID: "{{ taskID }}" + StepID: "POST-INSTALL" + StepStatus: "DONE" + TaskMessage: "" + TaskPercent: "60" + + - name: Update the step RESTART_NODE step as START + inframodules: + operation: "update-task" + taskID: "{{ taskID }}" + StepID: "RESTART_NODE" + StepStatus: "START" + TaskMessage: "Restarting {{ shortHostname.stdout }}. The restart can take up to 30 minutes. Do not make any configuration changes during this time. Note: An “Unable to contact server” error message may display while the node is restarting. This message does not indicate a failure and can be ignored. You may also be signed out of the Flex Appliance Console. If you are signed out, sign back in after the node has restarted. You can monitor the restart progress from the Veritas Remote Management Interface." + TaskPercent: "60" + when: + - taskID is defined and taskID != "" diff --git a/samples/update-reboot-commit/library/instances/online-check.preinstall b/samples/update-reboot-commit/library/instances/online-check.preinstall new file mode 100644 index 0000000..918d6e9 --- /dev/null +++ b/samples/update-reboot-commit/library/instances/online-check.preinstall @@ -0,0 +1,2 @@ +Description=Checking for in-progress operations and status of application instances +ExecStart=ansible-playbook ${PM_LIBRARY}/instances/online-check.yml diff --git a/samples/update-reboot-commit/library/instances/online-check.yml b/samples/update-reboot-commit/library/instances/online-check.yml new file mode 100644 index 0000000..08210b5 --- /dev/null +++ b/samples/update-reboot-commit/library/instances/online-check.yml @@ -0,0 +1,104 @@ +# Copyright (c) 2020 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +- name: Update precheck + hosts: localhost + remote_user: root + gather_facts: yes + vars: + web_console_ip: "{{ ansible_local.config.cluster.web_console_ip }}" + token: "{{ lookup('file', '/mnt/data/token/host_agent_token.jwt') }}" + tasks: + + - set_fact: detailedErrMsg="" + + - uri: + url: https://{{ web_console_ip }}//ms/v1/api/tasks + method: GET + validate_certs: no + headers: + Content-Type: "application/json" + x-auth-token: "{{ token }}" + register: tasks + + - name: Fetch taskID of the latest running update task + set_fact: + taskID: "{{ item.id }}" + when: + - item != "" + - item.state == "InProgress" + - item.type == "Update node" + with_items: "{{ tasks.json.tasks }}" + + - name: Check if there is a non-patch-update task that is currently in progress + set_fact: + detailedErrMsg: "Appliance update precheck failed. At least one operation is in progress on the appliance. Wait for all current operations to complete and try again." + when: + - item != "" + - item.state == "InProgress" + - item.type != "Update node" + with_items: "{{ tasks.json.tasks }}" + + - name: Update task status - Precheck failed as there is a task in progress + inframodules: + operation: update-task + taskID: "{{ taskID }}" + StepID: "INSTALL" + StepStatus: "FAILED" + TaskMessage: "{{ detailedErrMsg }}" + when: detailedErrMsg != '' + + - fail: msg="Precheck failed as there is a task In progress." + when: detailedErrMsg != '' + + - uri: + url: https://{{ web_console_ip }}//ms/v1/api/instances + method: GET + validate_certs: no + headers: + Content-Type: "application/json" + x-auth-token: "{{ token }}" + register: instances + + - name: Check if there is a pending instance upgrade + set_fact: + detailedErrMsg: "Appliance update precheck failed. At least one instance upgrade is in progress or pending. Complete all in-progress instance upgrades from the Flex Appliance Console, then try again." + when: item.upgradeState.status != "" + with_items: "{{ instances.json }}" + + - name: Update task status - Precheck failed as there is a task in progress + inframodules: + operation: update-task + taskID: "{{ taskID }}" + StepID: "INSTALL" + StepStatus: "FAILED" + TaskMessage: "{{ detailedErrMsg }}" + when: detailedErrMsg != '' + + - fail: msg="Precheck failed as there is a pending instance upgrade." + when: detailedErrMsg != '' + + - name: Get hostname + command: hostname + register: hname + + - name: Check if there is an online instance on the node + set_fact: + detailedErrMsg: "Appliance update precheck failed. At least one instance is not offline on the appliance. Stop the instance from the Flex Appliance Console, then try again." + when: + - item[1].status != "OFFLINE" + - item[1].node == hname.stdout + with_subelements: + - "{{ instances.json }}" + - state + + - name: Update task status - Precheck failed as there is instance online on the node + inframodules: + operation: update-task + taskID: "{{ taskID }}" + StepID: "INSTALL" + StepStatus: "FAILED" + TaskMessage: "{{ detailedErrMsg }}" + when: detailedErrMsg != '' + + - fail: msg="Precheck failed as there is instance online on the node." + when: detailedErrMsg != '' diff --git a/samples/update-reboot-commit/library/node-version/node-version.install b/samples/update-reboot-commit/library/node-version/node-version.install new file mode 100644 index 0000000..2fc2615 --- /dev/null +++ b/samples/update-reboot-commit/library/node-version/node-version.install @@ -0,0 +1,2 @@ +Description=Update node version for Flex appliance +ExecStart=ansible-playbook ${PM_LIBRARY}/node-version/set-node-version.yml -e version=__VERSION__ \ No newline at end of file diff --git a/samples/update-reboot-commit/library/node-version/set-node-version.yml b/samples/update-reboot-commit/library/node-version/set-node-version.yml new file mode 100644 index 0000000..d7668eb --- /dev/null +++ b/samples/update-reboot-commit/library/node-version/set-node-version.yml @@ -0,0 +1,16 @@ +# Copyright (c) 2020 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +--- +- hosts: localhost + remote_user: root + gather_facts: yes + + tasks: + - name: Update version in /etc/vxos-release fields + shell: "/opt/veritas/appliance/bin/set_vxosrel_info.sh -v '{{ version }}'" + + - name: Update DB + centralconfig: + operation: update + namespace: "platform_config" + params: "[{ \"key\": \"{{ ansible_machine_id }}.product_version\", \"value\": \"{{ version }}\"}]" \ No newline at end of file diff --git a/samples/update-reboot-commit/library/vcs/vcs-stop.sh b/samples/update-reboot-commit/library/vcs/vcs-stop.sh new file mode 100644 index 0000000..da7ec7c --- /dev/null +++ b/samples/update-reboot-commit/library/vcs/vcs-stop.sh @@ -0,0 +1,22 @@ +#!/bin/sh +# Copyright (c) 2020 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +scriptStatus=0 + +echo "Pausing 10 sec for updating task messages in WebUI ..." +sleep 10 + +echo "Stopping VERITAS Cluster Server (VCS) service..."; +/bin/systemctl stop vcs; +status=$?; +if [ ${status} -ne 0 ]; then + echo "Failed to stop VERITAS Cluster Server (VCS) service."; + scriptStatus=1 +fi +echo "Successfully stopped VERITAS Cluster Server (VCS) service."; + +# Run VERITAS Cluster Server (VCS) status to log output for debugging purposes. +echo "Display VERITAS Cluster Server (VCS) status..."; +/bin/systemctl status vcs; + +exit ${scriptStatus}; diff --git a/samples/update-reboot-commit/library/vcs/vcs.prereboot b/samples/update-reboot-commit/library/vcs/vcs.prereboot new file mode 100644 index 0000000..eb0e6d2 --- /dev/null +++ b/samples/update-reboot-commit/library/vcs/vcs.prereboot @@ -0,0 +1,4 @@ +# Copyright (c) 2020 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +Description=Stopping VERITAS Cluster Server (VCS) service... +ExecStart=/bin/sh ${PM_LIBRARY}/vcs/vcs-stop.sh +Requires=asum-reboot/update_status.prereboot diff --git a/samples/update-reboot-commit/rpm-info.json b/samples/update-reboot-commit/rpm-info.json new file mode 100644 index 0000000..f4a3814 --- /dev/null +++ b/samples/update-reboot-commit/rpm-info.json @@ -0,0 +1,43 @@ +{ + "description": [ + "This update RPM, updates the version of the node and reboots.", + "", + "Prerequisite: Any online application instances should be stopped first. Otherwise, update fails asking one to stop instances.", + "" + ], + "type": "update", + "compatibility-info": [ + { + "product-version": "2.0.0.*", + "install": { + "confirmation-message": [ + "Any online application instances should be stopped first.", + "", + "After update is installed, the node will be restarted, and the Web console may not be accessible for up to 10 mins.", + "You can monitor the progress of the restart via IPMI console." + ], + "requires-restart": true, + "supports-rollback": false, + "estimated-minutes": 25 + }, + "rollback": { + "confirmation-message": [ + "Sample multi-line confirmation message", + "", + "Display info messages and instructions like roll back requires restarting node, ", + "(even though install didn't require restart), as snapshot needs to be reverted." + ], + "requires-restart": true, + "estimated-minutes": 40 + }, + "commit": { + "confirmation-message": [ + "Sample multi-line confirmation message", + "", + "Display info messages and instructions like once update is committed, you cannot roll back." + ], + "estimated-minutes": 5 + } + } + ] +} \ No newline at end of file diff --git a/samples/update/imgs/commit-precheck.svg b/samples/update/imgs/commit-precheck.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update/imgs/commit-precheck.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update/imgs/commit.svg b/samples/update/imgs/commit.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update/imgs/commit.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update/imgs/install.svg b/samples/update/imgs/install.svg new file mode 100644 index 0000000..b7a0579 --- /dev/null +++ b/samples/update/imgs/install.svg @@ -0,0 +1,25 @@ + + + + + + +%3 + +cluster_0 + +install plugins + + +node-version/node-version.install + + +Update node version for Flex appliance + + + + + diff --git a/samples/update/imgs/postreboot.svg b/samples/update/imgs/postreboot.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update/imgs/postreboot.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update/imgs/preinstall.svg b/samples/update/imgs/preinstall.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update/imgs/preinstall.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update/imgs/prereboot.svg b/samples/update/imgs/prereboot.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update/imgs/prereboot.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update/imgs/prerollback.svg b/samples/update/imgs/prerollback.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update/imgs/prerollback.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update/imgs/rollback-precheck.svg b/samples/update/imgs/rollback-precheck.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update/imgs/rollback-precheck.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update/imgs/rollback.svg b/samples/update/imgs/rollback.svg new file mode 100644 index 0000000..9ed2e10 --- /dev/null +++ b/samples/update/imgs/rollback.svg @@ -0,0 +1,13 @@ + + + + + + +%3 + + + diff --git a/samples/update/library/node-version/node-version.install b/samples/update/library/node-version/node-version.install new file mode 100644 index 0000000..eb458f9 --- /dev/null +++ b/samples/update/library/node-version/node-version.install @@ -0,0 +1,3 @@ +# Copyright (c) 2020 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +Description=Update node version for Flex appliance +ExecStart=ansible-playbook ${PM_LIBRARY}/node-version/set-node-version.yml -e version=__VERSION__ \ No newline at end of file diff --git a/samples/update/library/node-version/set-node-version.yml b/samples/update/library/node-version/set-node-version.yml new file mode 100644 index 0000000..d7668eb --- /dev/null +++ b/samples/update/library/node-version/set-node-version.yml @@ -0,0 +1,16 @@ +# Copyright (c) 2020 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +--- +- hosts: localhost + remote_user: root + gather_facts: yes + + tasks: + - name: Update version in /etc/vxos-release fields + shell: "/opt/veritas/appliance/bin/set_vxosrel_info.sh -v '{{ version }}'" + + - name: Update DB + centralconfig: + operation: update + namespace: "platform_config" + params: "[{ \"key\": \"{{ ansible_machine_id }}.product_version\", \"value\": \"{{ version }}\"}]" \ No newline at end of file diff --git a/samples/update/rpm-info.json b/samples/update/rpm-info.json new file mode 100644 index 0000000..7cb84ad --- /dev/null +++ b/samples/update/rpm-info.json @@ -0,0 +1,40 @@ +{ + "description": [ + "Sample multi-line description", + "Consumers/GUI can display each item in this list in a separate para with appropriate linebreaks." + ], + "type": "update", + "compatibility-info": [ + { + "product-version": "2.0", + "install": { + "confirmation-message": [ + "Sample multi-line confirmation message", + "", + "Display warning messages and instructions like stopping instances." + ], + "requires-restart": false, + "supports-rollback": false, + "estimated-minutes": 25 + }, + "rollback": { + "confirmation-message": [ + "Sample multi-line confirmation message", + "", + "Display info messages and instructions like roll back requires restarting node, ", + "(even though install didn't require restart), as snapshot needs to be reverted." + ], + "requires-restart": true, + "estimated-minutes": 40 + }, + "commit": { + "confirmation-message": [ + "Sample multi-line confirmation message", + "", + "Display info messages and instructions like once update is committed, you cannot roll back." + ], + "estimated-minutes": 5 + } + } + ] +} \ No newline at end of file From c1c4271979ab288cc0907c9cf933cd7c64dec000 Mon Sep 17 00:00:00 2001 From: "Abhijith D.A" Date: Wed, 19 May 2021 21:15:02 -0700 Subject: [PATCH 04/10] Fix README path. --- sample/update/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sample/update/README.md b/sample/update/README.md index fe13bdf..c06850c 100644 --- a/sample/update/README.md +++ b/sample/update/README.md @@ -1,8 +1,8 @@ # Update plugins' and its dependencies -The document lists the plugins that are bundled into the update RPM using [ĀSUM SDK](https://stash.veritas.com/projects/AS/repos/platformx/browse/asum/sdk/README.md). +The document lists the plugins that are bundled into the update RPM using [ĀSUM SDK](../../sdk/README.md). -The update framework workflow details can be found in here: [ĀSUM update framework](https://stash.veritas.com/projects/AS/repos/platformx/browse/asum/docs/update.md). +The update framework workflow details can be found in here: [ĀSUM update framework](../../README.md). > NOTES From 3c8adad4e747633d31f24d36702f3ab865f428a1 Mon Sep 17 00:00:00 2001 From: "Abhijith D.A" Date: Sun, 6 Jun 2021 14:52:37 -0700 Subject: [PATCH 05/10] Update sum command usage. --- README.md | 122 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 69 insertions(+), 53 deletions(-) diff --git a/README.md b/README.md index 793e614..61b7bb2 100644 --- a/README.md +++ b/README.md @@ -9,25 +9,26 @@ The Software Update Manager (SUM) provides ability to update a system. It could - [Software Update Manager (SUM)](#software-update-manager-sum) -- [Architecture](#architecture) -- [Workflow](#workflow) - - [Plugins](#plugins) - - [Install](#install) - - [Reboot](#reboot) - - [Rollback](#rollback) - - [Commit](#commit) -- [Generating Update RPM](#generating-update-rpm) -- [Sample Update](#sample-update) -- [Usage](#usage) - - [Add software to repository](#add-software-to-repository) - - [Install {{ software_type }} RPM](#install--software_type--rpm) - - [Commit {{ software_type }} RPM](#commit--software_type--rpm) - - [Rollback {{ software_type }} RPM](#rollback--software_type--rpm) - - [Delete software](#delete-software) + - [Architecture](#architecture) + - [Workflow](#workflow) + - [Plugins](#plugins) + - [Install](#install) + - [Reboot](#reboot) + - [Rollback](#rollback) + - [Commit](#commit) + - [Generating Update RPM](#generating-update-rpm) + - [Sample Update](#sample-update) + - [Usage](#usage) + - [Add software to repository](#add-software-to-repository) + - [List software](#list-software) + - [Delete software](#delete-software) + - [Install ${software_type} RPM](#install-software_type-rpm) + - [Commit ${software_type} RPM](#commit-software_type-rpm) + - [Rollback ${software_type} RPM](#rollback-software_type-rpm) -# Architecture +## Architecture At a high-level, the SUM architecture is as shown below. @@ -42,13 +43,13 @@ At a high-level, the SUM architecture is as shown below. > NOTE: SUM is designed to update system locally. The rolling updates in case of cluster, should be handled by the orchestration layer. -# Workflow +## Workflow This section details out the design of the SUM Update Framework. ![SUM Update Framework](./docs/imgs/Update_Framework.svg) -## Plugins +### Plugins The SUM Framework uses [Plugin Manager (PM)](./pm.md) to perform several of it's actions, and defines the types of plugins to perform following user actions: install, reboot, rollback and commit in the below sections. The framework also defines certain variables to enable plugins to access following paths: @@ -58,14 +59,14 @@ The framework also defines certain variables to enable plugins to access followi | `${PM_LIBRARY}` | Plugins library path. | | `${VXAPP_UPGRADE_ROOT}` | Alternate root volume path for doing offline updates. | -### Install +#### Install | Types | File extensions | Examples | | --- | --- | --- | | Pre-install update actions | `.preinstall` | - Bind mount required file systems inside update volume before installing RPM/ISO, to share configuration that could be used during install step. One could have say `mountfs.preinstall` plugin to perform the same action.
- Check whether application is stopped. | | Install update actions | `.install` | VUF install could be used with appropriate arguments to perform either live or offline update. | -### Reboot +#### Reboot | Types | File extensions | Examples | | --- | --- | --- | @@ -74,7 +75,7 @@ The framework also defines certain variables to enable plugins to access followi > **NOTE:** If an action can be performed before the reboot, then it is recommended to do it in the `.prereboot` plugin rather than a `.postreboot` plugin, so that if there are any failures, it can be caught before reboot which helps in avoiding downtime for customers. -### Rollback +#### Rollback | Types | File extensions | Examples | | --- | --- | --- | @@ -82,7 +83,7 @@ The framework also defines certain variables to enable plugins to access followi | Pre rollback actions | `.prerollback` | - Set Grub to boot to old version in case of using offline update approach.
- Revert snapshot. | | Rollback actions | `.rollback` | - Remove new version container images. | -### Commit +#### Commit | Types | File extensions | Examples | | --- | --- | --- | @@ -96,63 +97,78 @@ The framework also defines certain variables to enable plugins to access followi The update plugins must be deployed into a plugin folder under plugins library path i.e., `${PM_LIBRARY}//`. To access this path in plugins, one must use environment variable `${PM_LIBRARY}` to access the plugins library location. -# Generating Update RPM +## Generating Update RPM An update RPM should be SUM format compliant in order for one to successfully install the update. In order to generate such an RPM, one should use SUM SDK. For more details about SDK, refer to [SUM SDK](./sdk/README.md). -# Sample Update +## Sample Update A example usage of the framework can be found here: [sample-update](sample/update/Makefile). -# Usage +## Usage -## Add software to repository +The following sub-sections detail out the software update manager command tool usage. The default values of some of the optional parameters are as follows: + +- `repo`: `"/system/software/repository/"` +- `output_format`: `"yaml"` + +### Add software to repository ```bash -$ ${sum_binary} repo add --filepath={{ software_staging_area }}/{{ software_name }} --repo={{ software_repo }}" +$ ${sum_binary} repo add -filepath=${software_staging_area}/${software_name} +[ -repo=${software_repo} ] ``` -## Install {{ software_type }} RPM +### List software ```bash -$ ${sum_binary} install --filename={{ software_name }} --type={{ software_type }} --repo={{ software_repo }}" +$ ${sum_binary} repo list +[ -repo=${software_repo} ] +[ -type=${software_type} ] +[ -filename=${software_name} ] ``` -## Commit {{ software_type }} RPM +### Delete software ```bash -$ ${sum_binary} commit --filename={{ software_name }} --type={{ software_type }} --repo={{ software_repo }} --output-file={{ output_file }} --output-format={{ output_format | default("yaml") }} +$ ${sum_binary} repo remove +-type=${software_type} +-filename=${software_name} +[ -repo=${software_repo} ] +[ -output-file=${output_file} ] +[ -output-format=${output_format} ] ``` -## Rollback {{ software_type }} RPM +### Install ${software_type} RPM ```bash -$ ${sum_binary} rollback --filename={{ software_name }} --type={{ software_type }} --repo={{ software_repo }} --output-file={{ output_file }} --output-format={{ output_format | default("yaml") }} +$ ${sum_binary} install +-filename=${software_name} +-type=${software_type} +[ -repo=${software_repo} ] ``` -## Delete software +### Commit ${software_type} RPM ```bash - {{ asum_binary }} repo remove - -repo={{ software_repo }} - -type={{ software_type | default("") }} - -filename={{ software_name | default("") }} +$ ${sum_binary} commit +-filename=${software_name} +-type=${software_type} +[ -repo=${software_repo} ] +[ -output-file=${output_file} ] +[ -output-format=${output_format} ] +``` + +### Rollback ${software_type} RPM + +```bash +$ ${sum_binary} rollback +-filename=${software_name} +-type=${software_type} +[ -repo=${software_repo} ] +[ -output-file=${output_file} ] +[ -output-format=${output_format} ] ``` From 1f0adc2ef3b9ca1adedf5baf4efd54a3d3e9dc1a Mon Sep 17 00:00:00 2001 From: "Abhijith D.A" Date: Sun, 6 Jun 2021 19:47:21 -0700 Subject: [PATCH 06/10] Update plugin manager version --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 36baf12..562a351 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,6 @@ go 1.12 // replace plugin-manager => ../plugin-manager/ require ( - github.com/VeritasOS/plugin-manager v1.0.0 + github.com/VeritasOS/plugin-manager v1.0.1 gopkg.in/yaml.v2 v2.4.0 ) From 35753a2394aa109205c7c31025b4350720969875 Mon Sep 17 00:00:00 2001 From: "Abhijith D.A" Date: Sun, 6 Jun 2021 20:01:24 -0700 Subject: [PATCH 07/10] Update branch to run --- .github/workflows/go.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 0b9ce2a..ff6ce7c 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -2,9 +2,9 @@ name: Go on: push: - branches: [ main v1] + branches: [ v1] pull_request: - branches: [ main v1] + branches: [ v1] jobs: From 88a8bd5a13c3fd2b025a1716b64d9169c16a8633 Mon Sep 17 00:00:00 2001 From: "Abhijith D.A" Date: Sun, 6 Jun 2021 20:07:34 -0700 Subject: [PATCH 08/10] Update status badge for v1 branch (#7) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 61b7bb2..c1a61cb 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Software Update Manager (SUM) -[![Go](https://github.com/VeritasOS/software-update-manager/actions/workflows/go.yml/badge.svg)](https://github.com/VeritasOS/software-update-manager/actions/workflows/go.yml) +[![Go](https://github.com/VeritasOS/software-update-manager/actions/workflows/go.yml/badge.svg?branch=v1)](https://github.com/VeritasOS/software-update-manager/actions/workflows/go.yml) The Software Update Manager (SUM) provides ability to update a system. It could be used to perform major or minor updates. The main difference of major and minor updates is whether the update RPM contains the entire ISO contents or not. The SUM framework can be used to perform live update or an offline update (i.e., utilitizing a separate partition). It relies on [Plugin Manager](https://"github.com/VeritasOS/plugin-manager) & Plugins to drive or replay any required configuration. The SUM architecture & workflow can be found in the following sections. From 46aaefc4c8c85c7f9c7004faec133e279cd71c4b Mon Sep 17 00:00:00 2001 From: "Abhijith D.A" Date: Sat, 26 Jun 2021 13:12:00 -0700 Subject: [PATCH 09/10] Rename asum to sum (#6) * Rename asum.go to sum.go * Update ASUM to sum. * Update format --- Makefile | 2 +- cmd/{asum/asum.go => sum/sum.go} | 8 ++-- repo/list.go | 4 +- repo/repo.go | 2 +- sample/update/Makefile | 4 +- .../library/asum-reboot/reboot.install | 6 +-- sdk/.gitignore | 3 +- sdk/Makefile | 26 ++++++------ sdk/README.md | 11 ++--- sdk/asumrpm.spec | 2 +- sdk/scripts/commit | 2 +- sdk/scripts/install | 2 +- sdk/scripts/reboot | 2 +- sdk/scripts/rollback | 4 +- update/update.go | 8 ++-- utils/rpm/rpm.go | 2 +- utils/rpm/rpm_test.go | 40 +++++++++---------- 17 files changed, 64 insertions(+), 64 deletions(-) rename cmd/{asum/asum.go => sum/sum.go} (95%) diff --git a/Makefile b/Makefile index d2a6baa..15253ad 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,7 @@ GOTOOLSBIN=$(TOP)/tools/go/ # SUM SDK related variables SDK_NAME = sum-sdk -# Keep the SDK_VERSION same as ASUM_RPM_FORMAT_VERSION. +# Keep the SDK_VERSION same as RPM_FORMAT_VERSION. SDK_VERSION = 2.0 SDK_REVERSION = 1 SDK_SOURCE_PATH = $(TOP)/sdk diff --git a/cmd/asum/asum.go b/cmd/sum/sum.go similarity index 95% rename from cmd/asum/asum.go rename to cmd/sum/sum.go index 6543479..05d24ba 100644 --- a/cmd/asum/asum.go +++ b/cmd/sum/sum.go @@ -21,7 +21,7 @@ var ( // progname is name of my binary/program/executable. progname = filepath.Base(os.Args[0]) // version of my program. - version = "5.8" + version = "5.9" ) func mainRegisterCmdOptions() { @@ -35,7 +35,7 @@ var mainCmdOptions struct { } func init() { - config.SetLogDir("/var/log/asum/") + config.SetLogDir("/var/log/sum/") } func main() { @@ -118,7 +118,7 @@ func usage(progname, subcmd string) { switch subcmd { case "": const usageStr = ` -SUM (PROGNAME) is a tool for Software Updates Management (ASUM). +SUM (PROGNAME) is a tool for Software Updates Management (SUM). Usage: @@ -132,7 +132,7 @@ The commands are: reboot reboots/restarts the node running reboots specific action for installing software update. repo perform Software Repository management operations. rollback rolls back the installed software update. - version print Software Updates Management (ASUM) version. + version print Software Updates Management (SUM) version. Use "PROGNAME help [command]" for more information about a command. diff --git a/repo/list.go b/repo/list.go index 08280c3..ae682f7 100644 --- a/repo/list.go +++ b/repo/list.go @@ -21,9 +21,9 @@ import ( "gopkg.in/yaml.v2" ) -// FormatVersionName is the ASUM RPM format version string that's embedded +// FormatVersionName is the RPM format version string that's embedded // into RPM used for identifying the JSON format version. -const FormatVersionName = "ASUM RPM Format Version" +const FormatVersionName = "RPM Format Version" // RPMInfo is the list of RPM package info type RPMInfo interface { diff --git a/repo/repo.go b/repo/repo.go index f4cba6c..123db83 100644 --- a/repo/repo.go +++ b/repo/repo.go @@ -84,7 +84,7 @@ func registerCommandVersion(progname string) { // Input: // 1. map[string]interface{} // where, the options could be following: -// "progname": Name of the program along with any cmds (ex: asum pm) +// "progname": Name of the program along with any cmds (ex: sum pm) // "cmd-index": Index to the cmd (ex: run) func ScanCommandOptions(options map[string]interface{}) error { log.Printf("Entering ScanCommandOptions(%+v)...", options) diff --git a/sample/update/Makefile b/sample/update/Makefile index bc34b75..0d646be 100644 --- a/sample/update/Makefile +++ b/sample/update/Makefile @@ -20,14 +20,14 @@ all: update .PHONY: update update: - @echo "======== Generating update RPM using ASUM SDK..."; \ + @echo "======== Generating update RPM using SUM SDK..."; \ $(MAKE) -C $(SUM_SDK_PATH) generate \ PLUGINS_LIBRARY=$(TOP)/library/ \ RPM_NAME=$(UPDATE_RPM_NAME) \ RPM_VERSION=$(UPDATE_VERSION) \ RPM_RELEASE=$(UPDATE_RELEASE) \ RPM_URL=$(UPDATE_URL) \ - RPM_SUMMARY="Sample Update RPM created using ASUM SDK" \ + RPM_SUMMARY="Sample Update RPM created using SUM SDK" \ RPM_INFO_FILE=$(UPDATE_RPM_INFO_FILE) || exit $$? diff --git a/samples/update-reboot-commit/library/asum-reboot/reboot.install b/samples/update-reboot-commit/library/asum-reboot/reboot.install index efd9676..47074e1 100644 --- a/samples/update-reboot-commit/library/asum-reboot/reboot.install +++ b/samples/update-reboot-commit/library/asum-reboot/reboot.install @@ -1,5 +1,5 @@ -# Copyright (c) 2020 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +# Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 Description=Preparing for reboot -# NOTE: Call `asum reboot` as the last step in `install` step to automatically +# NOTE: Call `sum reboot` as the last step in `install` step to automatically # invoke (pre)reboot actions. -ExecStart=${PM_LIBRARY}/../asum reboot +ExecStart=${PM_LIBRARY}/../sum reboot diff --git a/sdk/.gitignore b/sdk/.gitignore index b5b41d7..9229287 100644 --- a/sdk/.gitignore +++ b/sdk/.gitignore @@ -1,2 +1 @@ -asum -pm \ No newline at end of file +sum diff --git a/sdk/Makefile b/sdk/Makefile index 46624bb..87b3105 100644 --- a/sdk/Makefile +++ b/sdk/Makefile @@ -1,14 +1,14 @@ # Copyright (c) 2021 Veritas Technologies LLC. All rights reserved IP63-2828-7171-04-15-9. -# ASUM_RPM_FORMAT_VERSION is like a protocol version. -# This will be used by the ASUM binary to list the contents appropriately. +# RPM_FORMAT_VERSION is like a protocol version. +# This will be used by the SUM binary to list the contents appropriately. # This version needs to be updated, only if there is format/layout changes -# that breaks backward compatibility w.r.t. parsing RPM info by the `asum` +# that breaks backward compatibility w.r.t. parsing RPM info by the `sum` # binary. -ASUM_RPM_FORMAT_VERSION=2 +RPM_FORMAT_VERSION=2 # Jenking build options -PRODUCT?=asum +PRODUCT?=sum VERSION?=1.0 BRANCH?=main @@ -32,8 +32,8 @@ PLUGINS_LIBRARY?=$(PACKAGE_DIR)/library PLUGIN_TYPES?="preinstall install prereboot postreboot rollback-precheck prerollback rollback commit-precheck commit" RPM_PRE_SCRIPT?=$(RPM_SCRIPTS_DIR)/rpm_pre.sh -RPM_SPEC_FILE=asumrpm.spec -RPM_INST_FILE=asumrpm.inst +RPM_SPEC_FILE=sumrpm.spec +RPM_INST_FILE=sumrpm.inst TARGET_DIR=system/upgrade/repository/update/$(RPM_NAME)-$(RPM_VERSION)-$(RPM_RELEASE) @@ -50,7 +50,7 @@ all: generate # required for updating the system. .PHONY: generate generate: - @echo ===== Generating ASUM RPM ===== + @echo ===== Generating SUM RPM ===== @echo Creating spec and inst files. if [ ! -f $(RPM_INFO_FILE) ] ; then \ echo "RPM Info JSON file does not exist at $(RPM_INFO_FILE)." ; \ @@ -76,7 +76,7 @@ generate: -e "s%__VERSION__%$(RPM_VERSION)%g" \ -e "s|__RELEASE__|$(RPM_RELEASE)|g" \ -e "s%__URL__%$(RPM_URL)%g" \ - -e "s%__ASUM_RPM_FORMAT_VERSION__%$(ASUM_RPM_FORMAT_VERSION)%g" \ + -e "s%__RPM_FORMAT_VERSION__%$(RPM_FORMAT_VERSION)%g" \ -e "s%__RPM_INFO__%$${rpm_desc}%g" \ $(PACKAGE_DIR)/SPECS/$(RPM_SPEC_FILE); @@ -95,7 +95,7 @@ generate: 7z x $(ISO_PATH) -o$(PLUGINS_LIBRARY)/$${iso_target_dir}/ -y; \ fi ; \ - @echo ======== Adding ASUM scripts and plugins for version $(RPM_VERSION) for RPM packaging. + @echo ======== Adding SUM scripts and plugins for version $(RPM_VERSION) for RPM packaging. cp -v ${RPM_SCRIPTS_DIR}/* $(PACKAGE_DIR)/$(TARGET_DIR) cp -Rv $(PLUGINS_LIBRARY) $(PACKAGE_DIR)/$(TARGET_DIR) @@ -105,7 +105,7 @@ generate: pluginTypes=$(PLUGIN_TYPES); \ for pt in $${pluginTypes}; do \ echo "Generating image for $${pt}..."; \ - $(TOP)/scripts/asum pm list -library $(PLUGINS_LIBRARY) -type $${pt} -log-dir $(PLUGINS_LIBRARY)/../imgs/ -log-file $${pt}; \ + $(TOP)/scripts/sum pm list -library $(PLUGINS_LIBRARY) -type $${pt} -log-dir $(PLUGINS_LIBRARY)/../imgs/ -log-file $${pt}; \ done; @echo ======== Keeping only plugin graph images and cleaning logs in image dir. @@ -135,7 +135,7 @@ generate: done; \ git add $${readme} $(PLUGINS_LIBRARY)/../imgs/*.svg; - @echo ======== Calling mkrpm.sh utility to generate the ASUM RPM. + @echo ======== Calling mkrpm.sh utility to generate the SUM RPM. cd $(PACKAGE_DIR); \ if [ -n "$(SHIP_DIR)" ]; then \ export PLATFORM_RPM_LOCATION=$(SHIP_DIR); \ @@ -145,7 +145,7 @@ generate: .PHONY: clean clean: - @echo ======== Removing ASUM RPM artifacts. + @echo ======== Removing SUM RPM artifacts. -rm -rf $(PACKAGE_DIR) diff --git a/sdk/README.md b/sdk/README.md index 4aa5337..ded45c5 100644 --- a/sdk/README.md +++ b/sdk/README.md @@ -1,13 +1,14 @@ -# ASUM SDK +# SUM SDK ## Update RPM Generation -An update RPM should be ASUM format compliant in order for one to successfully install the update. In order to generate such an RPM, one should use the [ASUM SDK](../sdk/Makefile) along with providing appropriate [rpm information](./rpm-info.json). +An update RPM should be SUM format compliant in order for one to successfully install the update. In order to generate such an RPM, one should use the [SUM SDK](../sdk/Makefile) along with providing appropriate [rpm information](./rpm-info.json). ### Prerequisites To use this framework, you need the following: -1. ASUM SDK + +1. SUM SDK 2. JSON file containing RPM information. Ex: [rpm-info.json](./rpm-info.json). 3. Library of plugin folders containing plugins. 4. Optionally ISO can be specified through `ISO_PATH` to include ISO contents into the RPM. @@ -15,10 +16,10 @@ To use this framework, you need the following: ### Usage -Download and extract the ASUM SDK, and call the `make` target as below by specifying appropriate parameters as shown below: +Download and extract the SUM SDK, and call the `make` target as below by specifying appropriate parameters as shown below: ```bash -$(MAKE) -C $(ASUM_SDK_PATH) generate \ +$(MAKE) -C $(SUM_SDK_PATH) generate \ ISO_PATH=$${Path_to_ISO} \ PLUGINS_LIBRARY=$${PluginsLibraryPath} \ RPM_NAME=$${RPM_Name} \ diff --git a/sdk/asumrpm.spec b/sdk/asumrpm.spec index bbe7d3f..03160a0 100644 --- a/sdk/asumrpm.spec +++ b/sdk/asumrpm.spec @@ -13,7 +13,7 @@ BuildArch: x86_64 Packager: Appliance Solutions %description -ASUM RPM Format Version : __ASUM_RPM_FORMAT_VERSION__ +RPM Format Version : __RPM_FORMAT_VERSION__ RPM Info : __RPM_INFO__ %define debug_package %{nil} diff --git a/sdk/scripts/commit b/sdk/scripts/commit index cb5373d..ef1bbcb 100755 --- a/sdk/scripts/commit +++ b/sdk/scripts/commit @@ -5,7 +5,7 @@ myprg=$0 myDir=$(dirname ${myprg}) echo "Committing the update..." -${myDir}/asum commit "$@" +${myDir}/sum commit "$@" if [ $? -ne 0 ]; then echo "Error: Failed to commit the update." exit 1 diff --git a/sdk/scripts/install b/sdk/scripts/install index ce87f23..a665f97 100755 --- a/sdk/scripts/install +++ b/sdk/scripts/install @@ -19,7 +19,7 @@ fi export VXAPP_UPGRADE_ROOT="/system/upgrade/volume/" echo "Installing the update..." -${myDir}/asum install "$@" +${myDir}/sum install "$@" if [ $? -ne 0 ]; then echo "Error: Failed to install the update." exit 1 diff --git a/sdk/scripts/reboot b/sdk/scripts/reboot index dc4d2f6..a081d3b 100755 --- a/sdk/scripts/reboot +++ b/sdk/scripts/reboot @@ -9,7 +9,7 @@ myDir=$(dirname ${myprg}) export VXAPP_UPGRADE_ROOT="/system/upgrade/volume/" echo "Restarting the node to continue with the update installation..." -${myDir}/asum reboot "$@" +${myDir}/sum reboot "$@" if [ $? -ne 0 ]; then echo "Error: Failed to restart the node." exit 1 diff --git a/sdk/scripts/rollback b/sdk/scripts/rollback index 4c03e58..ca96fbd 100755 --- a/sdk/scripts/rollback +++ b/sdk/scripts/rollback @@ -6,13 +6,13 @@ myDir=$(dirname ${myprg}) echo "Rolling back the update..." # NOTE: -# 1. The rollback plugins would now be called as part of the `asum install` +# 1. The rollback plugins would now be called as part of the `sum install` # itself. # 2. This `rollback` script (i.e., ${myprg}) would be called through rollback # REST API soon to do rollback-precheck, reboot & then rollback. Till # then, this is just a noop i.e., exit with 0. - ${myDir}/asum rollback "$@" + ${myDir}/sum rollback "$@" if [ $? -ne 0 ]; then echo "Error: Failed to roll back the update." exit 1 diff --git a/update/update.go b/update/update.go index b3cfb0f..72bdb0a 100644 --- a/update/update.go +++ b/update/update.go @@ -29,10 +29,10 @@ const ( // Status is the execution/run status of PM on a specified plugin type. type Status struct { // INFO: The Status contains info of all operations so as to support - // all operations with one call to asum with appropriate flags set to + // all operations with one call to sum with appropriate flags set to // continue onto the next operation. // Ex: Install can receive auto-reboot=true, in which after installation - // is completed successfully, `asum` will run reboot operation. + // is completed successfully, `sum` will run reboot operation. Install []pm.RunStatus `yaml:",omitempty"` Reboot []pm.RunStatus `yaml:",omitempty"` Rollback []pm.RunStatus `yaml:",omitempty"` @@ -149,7 +149,7 @@ func runCmdFromRPM(action, swName, swType string, params map[string]string) erro rpmInfo := listInfo[0] // INFO: Only for `install` action, we need to first install/extract the - // ASUM RPM to get the install script. For all other actions, the + // SUM RPM to get the install script. For all other actions, the // install script is expected to run first, and hence they're // expected to be present at the scripts location. if "install" == action { @@ -370,7 +370,7 @@ func RegisterCommandOptions(progname string) { // Input: // 1. map[string]interface{} // where, the options could be following: -// "progname": Name of the program along with any cmds (ex: asum pm) +// "progname": Name of the program along with any cmds (ex: sum pm) // "cmd-index": Index to the cmd (ex: run) func ScanCommandOptions(options map[string]interface{}) error { log.Printf("Entering ScanCommandOptions(%+v)...", options) diff --git a/utils/rpm/rpm.go b/utils/rpm/rpm.go index 0d526ef..f956a84 100644 --- a/utils/rpm/rpm.go +++ b/utils/rpm/rpm.go @@ -1,6 +1,6 @@ // Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 -// Package rpm contains utility functions (required by ASUM) for managing RPM files. +// Package rpm contains utility functions (required by SUM) for managing RPM files. package rpm import ( diff --git a/utils/rpm/rpm_test.go b/utils/rpm/rpm_test.go index 17e7883..1516f4b 100644 --- a/utils/rpm/rpm_test.go +++ b/utils/rpm/rpm_test.go @@ -87,30 +87,30 @@ Packager : Veritas AS DevOps URL : https://www.veritas.com/support/en_US.html Summary : Sample Update RPM created using ASUM SDK Description : -ASUM RPM Format Version : 2 +RPM Format Version : 2 RPM Info : {"Description":["Sample multi-line description of the update RPM.","Client scripts/GUI can display each item in this list in a separate para with appropriate linebreaks."],"Type":"Update","VersionInfo":[{"Version":"*","install":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like restarting node, ","and confirming that users have stopped instances."],"requires-restart":true,"supports-rollback":false,"estimated-minutes":35},"rollback":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like restarting node, ","and confirming that users have stopped instances."],"requires-restart":true,"estimated-minutes":20},"commit":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like once update is committed, you cannot roll back.","Also, committing node restarts some services."],"estimated-minutes":5}},{"Version":"3.*","install":{"confirmation-message":["Sample multi-line confirmation message","","Display warning messages and instructions like stopping instances."],"requires-restart":false,"supports-rollback":false,"estimated-minutes":25},"rollback":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like roll back requires restarting node, ","(even though install didn't require restart), as snapshot needs to be reverted."],"requires-restart":true,"estimated-minutes":40},"commit":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like once update is committed, you cannot roll back."],"estimated-minutes":5}}]} `, }, want: map[string]string{ - "ASUM RPM Format Version": "2", - "Architecture": "x86_64", - "Build Date": "Wed 22 Jul 2020 05:17:50 PM PDT", - "Build Host": "builder", - "Description": "", - "Group": "Unspecified", - "Install Date": "(not installed)", - "License": "Copyright (c) 2020 Veritas Technologies LLC. All rights reserved.", - "Name": "VRTSasum-update", - "Packager": "Veritas AS DevOps ", - "RPM Info": `{"Description":["Sample multi-line description of the update RPM.","Client scripts/GUI can display each item in this list in a separate para with appropriate linebreaks."],"Type":"Update","VersionInfo":[{"Version":"*","install":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like restarting node, ","and confirming that users have stopped instances."],"requires-restart":true,"supports-rollback":false,"estimated-minutes":35},"rollback":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like restarting node, ","and confirming that users have stopped instances."],"requires-restart":true,"estimated-minutes":20},"commit":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like once update is committed, you cannot roll back.","Also, committing node restarts some services."],"estimated-minutes":5}},{"Version":"3.*","install":{"confirmation-message":["Sample multi-line confirmation message","","Display warning messages and instructions like stopping instances."],"requires-restart":false,"supports-rollback":false,"estimated-minutes":25},"rollback":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like roll back requires restarting node, ","(even though install didn't require restart), as snapshot needs to be reverted."],"requires-restart":true,"estimated-minutes":40},"commit":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like once update is committed, you cannot roll back."],"estimated-minutes":5}}]}`, - "Release": "20200723001743", - "Relocations": "(not relocatable)", - "Signature": "RSA/SHA256, Mon 06 Jul 2020 02:41:26 PM PDT, Key ID cf784714d9712e70", - "Size": "6098439", - "Source RPM": "VRTSasum-update-2.0.1-20200723001743.src.rpm", - "Summary": "Sample Update RPM created using ASUM SDK", - "URL": "https://www.veritas.com/support/en_US.html", - "Version": "2.0.1", + "RPM Format Version": "2", + "Architecture": "x86_64", + "Build Date": "Wed 22 Jul 2020 05:17:50 PM PDT", + "Build Host": "builder", + "Description": "", + "Group": "Unspecified", + "Install Date": "(not installed)", + "License": "Copyright (c) 2020 Veritas Technologies LLC. All rights reserved.", + "Name": "VRTSasum-update", + "Packager": "Veritas AS DevOps ", + "RPM Info": `{"Description":["Sample multi-line description of the update RPM.","Client scripts/GUI can display each item in this list in a separate para with appropriate linebreaks."],"Type":"Update","VersionInfo":[{"Version":"*","install":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like restarting node, ","and confirming that users have stopped instances."],"requires-restart":true,"supports-rollback":false,"estimated-minutes":35},"rollback":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like restarting node, ","and confirming that users have stopped instances."],"requires-restart":true,"estimated-minutes":20},"commit":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like once update is committed, you cannot roll back.","Also, committing node restarts some services."],"estimated-minutes":5}},{"Version":"3.*","install":{"confirmation-message":["Sample multi-line confirmation message","","Display warning messages and instructions like stopping instances."],"requires-restart":false,"supports-rollback":false,"estimated-minutes":25},"rollback":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like roll back requires restarting node, ","(even though install didn't require restart), as snapshot needs to be reverted."],"requires-restart":true,"estimated-minutes":40},"commit":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like once update is committed, you cannot roll back."],"estimated-minutes":5}}]}`, + "Release": "20200723001743", + "Relocations": "(not relocatable)", + "Signature": "RSA/SHA256, Mon 06 Jul 2020 02:41:26 PM PDT, Key ID cf784714d9712e70", + "Size": "6098439", + "Source RPM": "VRTSasum-update-2.0.1-20200723001743.src.rpm", + "Summary": "Sample Update RPM created using ASUM SDK", + "URL": "https://www.veritas.com/support/en_US.html", + "Version": "2.0.1", }, }, } From 84a51c23e4bc6e8680824dc7eb72f217f71446fc Mon Sep 17 00:00:00 2001 From: "Abhijith D.A." Date: Wed, 27 Nov 2024 23:14:47 -0800 Subject: [PATCH 10/10] Sync-up: syslog; pm run-time; (#9) --- boot/main.go | 144 +++ boot/main_test.go | 165 +++ boot/test_files/etc/vxos-release | 2 + .../system/upgrade/volume/etc/vxos-release | 2 + cmd/sum/sum.go | 40 +- go.mod | 4 +- go.sum | 6 +- repo/add.go | 42 +- repo/list.go | 204 ++-- repo/list_test.go | 117 +- repo/remove.go | 40 +- repo/remove_test.go | 12 +- repo/repo.go | 46 +- repo/test_files/default-response.yaml | 29 + .../test_files/include-checksum-response.yaml | 30 + repo/test_files/valid-rpm-metadata.txt | 20 + update/update.go | 163 ++- utils/rpm/rpm.go | 73 +- utils/rpm/rpm_test.go | 57 +- validate/validate.go | 88 +- validate/validate_test.go | 160 ++- validate/version/version.go | 52 +- validate/version/version_test.go | 9 +- .../VeritasOS/plugin-manager/.gitignore | 15 + .../VeritasOS/plugin-manager/LICENSE | 21 + .../VeritasOS/plugin-manager/Makefile | 124 ++ .../VeritasOS/plugin-manager/Makefile.conf | 3 + .../VeritasOS/plugin-manager/README.md | 447 +++++++ .../VeritasOS/plugin-manager/config/config.go | 182 +++ .../VeritasOS/plugin-manager/go.mod | 5 + .../VeritasOS/plugin-manager/go.sum | 4 + .../VeritasOS/plugin-manager/graph/graph.go | 194 +++ .../VeritasOS/plugin-manager/plugin.go | 1035 +++++++++++++++++ .../plugin-manager/types/runtime/runtime.go | 12 + .../plugin-manager/types/status/status.go | 15 + .../VeritasOS/plugin-manager/types/types.go | 24 + .../VeritasOS/plugin-manager/utils/log/log.go | 567 +++++++++ .../VeritasOS/plugin-manager/utils/os/os.go | 70 ++ .../plugin-manager/utils/output/output.go | 112 ++ vendor/gopkg.in/yaml.v2/.travis.yml | 17 - vendor/gopkg.in/yaml.v2/LICENSE | 201 ---- vendor/gopkg.in/yaml.v2/encode.go | 390 ------- vendor/gopkg.in/yaml.v2/go.mod | 5 - vendor/gopkg.in/yaml.v2/writerc.go | 26 - .../LICENSE.libyaml => yaml.v3/LICENSE} | 39 +- vendor/gopkg.in/{yaml.v2 => yaml.v3}/NOTICE | 0 .../gopkg.in/{yaml.v2 => yaml.v3}/README.md | 31 +- vendor/gopkg.in/{yaml.v2 => yaml.v3}/apic.go | 61 +- .../gopkg.in/{yaml.v2 => yaml.v3}/decode.go | 629 ++++++---- .../gopkg.in/{yaml.v2 => yaml.v3}/emitterc.go | 413 ++++++- vendor/gopkg.in/yaml.v3/encode.go | 577 +++++++++ vendor/gopkg.in/yaml.v3/go.mod | 5 + .../gopkg.in/{yaml.v2 => yaml.v3}/parserc.go | 165 ++- .../gopkg.in/{yaml.v2 => yaml.v3}/readerc.go | 24 +- .../gopkg.in/{yaml.v2 => yaml.v3}/resolve.go | 138 ++- .../gopkg.in/{yaml.v2 => yaml.v3}/scannerc.go | 363 +++++- .../gopkg.in/{yaml.v2 => yaml.v3}/sorter.go | 25 +- vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/{yaml.v2 => yaml.v3}/yaml.go | 350 ++++-- vendor/gopkg.in/{yaml.v2 => yaml.v3}/yamlh.go | 80 +- .../{yaml.v2 => yaml.v3}/yamlprivateh.go | 37 +- vendor/modules.txt | 14 +- 62 files changed, 6472 insertions(+), 1501 deletions(-) create mode 100644 boot/main.go create mode 100644 boot/main_test.go create mode 100644 boot/test_files/etc/vxos-release create mode 100644 boot/test_files/system/upgrade/volume/etc/vxos-release create mode 100644 repo/test_files/default-response.yaml create mode 100644 repo/test_files/include-checksum-response.yaml create mode 100644 repo/test_files/valid-rpm-metadata.txt create mode 100644 vendor/github.com/VeritasOS/plugin-manager/.gitignore create mode 100644 vendor/github.com/VeritasOS/plugin-manager/LICENSE create mode 100644 vendor/github.com/VeritasOS/plugin-manager/Makefile create mode 100644 vendor/github.com/VeritasOS/plugin-manager/Makefile.conf create mode 100644 vendor/github.com/VeritasOS/plugin-manager/README.md create mode 100644 vendor/github.com/VeritasOS/plugin-manager/config/config.go create mode 100644 vendor/github.com/VeritasOS/plugin-manager/go.mod create mode 100644 vendor/github.com/VeritasOS/plugin-manager/go.sum create mode 100644 vendor/github.com/VeritasOS/plugin-manager/graph/graph.go create mode 100644 vendor/github.com/VeritasOS/plugin-manager/plugin.go create mode 100644 vendor/github.com/VeritasOS/plugin-manager/types/runtime/runtime.go create mode 100644 vendor/github.com/VeritasOS/plugin-manager/types/status/status.go create mode 100644 vendor/github.com/VeritasOS/plugin-manager/types/types.go create mode 100644 vendor/github.com/VeritasOS/plugin-manager/utils/log/log.go create mode 100644 vendor/github.com/VeritasOS/plugin-manager/utils/os/os.go create mode 100644 vendor/github.com/VeritasOS/plugin-manager/utils/output/output.go delete mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml delete mode 100644 vendor/gopkg.in/yaml.v2/LICENSE delete mode 100644 vendor/gopkg.in/yaml.v2/encode.go delete mode 100644 vendor/gopkg.in/yaml.v2/go.mod delete mode 100644 vendor/gopkg.in/yaml.v2/writerc.go rename vendor/gopkg.in/{yaml.v2/LICENSE.libyaml => yaml.v3/LICENSE} (51%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/NOTICE (100%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/README.md (66%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/apic.go (93%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/decode.go (51%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/emitterc.go (80%) create mode 100644 vendor/gopkg.in/yaml.v3/encode.go create mode 100644 vendor/gopkg.in/yaml.v3/go.mod rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/parserc.go (85%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/readerc.go (91%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/resolve.go (60%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/scannerc.go (87%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/sorter.go (76%) create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/yaml.go (55%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/yamlh.go (88%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/yamlprivateh.go (78%) diff --git a/boot/main.go b/boot/main.go new file mode 100644 index 0000000..e785aad --- /dev/null +++ b/boot/main.go @@ -0,0 +1,144 @@ +/** +* Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + */ + +package boot + +import ( + "encoding/json" + "errors" + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "sort" + "strings" +) + +// Variables for retrieving the version information from release file +const ( + VxosReleaseFile = "/etc/vxos-release" + VersionPattern = "product-version = " +) + +// KernelInfo stores the information for kernels +type KernelInfo struct { + Name string `json:"name"` + Version string `json:"version"` + Kernel string `json:"kernel"` + Device string `json:"device"` +} + +var ( + fExecCommand = execCommand + fGetVolumeInfo = getVolumeInfo +) + +// Execution handler for running a command on os +func execCommand(operation string, command []string) string { + out, err := exec.Command(operation, command...).Output() + if err != nil { + message := fmt.Sprintf("Failed to execute command '%s %s'. Error: %s.", operation, command, err) + fmt.Fprintf(os.Stderr, "%v\n", message) + } + return string(out) +} + +func execCommandBash(cmdStr string) (string, error) { + cmd := exec.Command("bash", "-c", cmdStr) + out, err := cmd.Output() + outStr := string(out) + if err != nil { + message := fmt.Sprintf("Failed to run command [%s].", cmdStr) + err = errors.New(message) + } + return outStr, err +} + +func getVersionInfo(file string) string { + content, _ := ioutil.ReadFile(file) + lines := strings.Split(string(content), "\n") + + for _, line := range lines { + if strings.Contains(line, VersionPattern) { + return strings.Split(line, VersionPattern)[1] + } + } + return "" +} + +func getVolumeInfo(volumePath string) (string, string) { + rpmCmd := "rpm -q kernel --root=" + volumePath + " --qf '%{VERSION}-%{RELEASE}.%{ARCH}\n'" + device := strings.TrimSpace(execCommand("findmnt", []string{"-n", "-o", "SOURCE", volumePath})) + kernels, err := execCommandBash(rpmCmd) + if err != nil { + return device, "" + } + return device, kernels +} + +func getKernelMap(releaseFile string, bootVolumes map[string]map[string]string, newVersion string) []KernelInfo { + kernelMap := make([]KernelInfo, 0) + runningKernel := strings.TrimSpace(fExecCommand("uname", []string{"-r"})) + + volumeNames := make([]string, 0, len(bootVolumes)) + for volumeName := range bootVolumes { + volumeNames = append(volumeNames, volumeName) + } + sort.Strings(volumeNames) + + for _, volumeName := range volumeNames { + for _, volumePath := range bootVolumes[volumeName] { + version := getVersionInfo(volumePath + releaseFile) + device, kernels := fGetVolumeInfo(volumePath) + + for _, kernel := range strings.Fields(kernels) { + kernelInfo := KernelInfo{ + Name: volumeName, + Version: version, + Kernel: kernel, + Device: device, + } + if volumeName == "current" && kernel != runningKernel { + kernelInfo.Name = "patch" + if len(newVersion) != 0 { + kernelInfo.Version = newVersion + } + } + kernelMap = append(kernelMap, kernelInfo) + } + } + } + return kernelMap +} + +// Exec executes boot management +func Exec(args []string) error { + os.Args = args + + prepareFlag := flag.Bool("prepare", false, "command for preparing boot info: boot -prepare -release_file= -volumes= -version=") + releaseFile := flag.String("release_file", "", "path of release file") + bootVolumes := flag.String("volumes", "", "bootable volumes") + newVersion := flag.String("version", "", "version") + + flag.Parse() + + if *prepareFlag { + if *bootVolumes == "" { + flag.PrintDefaults() + return fmt.Errorf("Bootable volumes need to be specified") + } + if *releaseFile == "" { + *releaseFile = VxosReleaseFile + } + bootVolumesMap := make(map[string]map[string]string) + json.Unmarshal([]byte(*bootVolumes), &bootVolumesMap) + kernelMap := getKernelMap(*releaseFile, bootVolumesMap, *newVersion) + kernelMapOutput, _ := json.Marshal(kernelMap) + fmt.Println(string(kernelMapOutput)) + return nil + } + flag.PrintDefaults() + return fmt.Errorf("Invalid inputs of command") +} diff --git a/boot/main_test.go b/boot/main_test.go new file mode 100644 index 0000000..3b68338 --- /dev/null +++ b/boot/main_test.go @@ -0,0 +1,165 @@ +/** +* Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + */ + +package boot + +import ( + "fmt" + "reflect" + "testing" +) + +var ( + releaseFile string + newVersion string + pathCurrent map[string]string + pathUpgrade map[string]string + kernelCurrent string + kernelUpgrade string + kernelPatch string + deviceCurrent string + deviceUpgrade string + kernelInfoCurrent KernelInfo + kernelInfoUpgrade KernelInfo + kernelInfoPatch KernelInfo +) + +func init() { + releaseFile = "/etc/vxos-release" + newVersion = "1.4.1" + pathCurrent = map[string]string{"path": "./test_files/"} + pathUpgrade = map[string]string{"path": "./test_files/system/upgrade/volume"} + kernelCurrent = "3.10.0-1062.7.1.el7.x86_64" + kernelUpgrade = "3.10.1-1062.7.1.el7.x86_64" + kernelPatch = "3.10.0-1062.9.1.el7.x86_64" + deviceCurrent = "/dev/mapper/system-thunder_cloud_1.4--999" + deviceUpgrade = "/dev/mapper/system-thunder_cloud_2.0--999" + kernelInfoCurrent = KernelInfo{Name: "current", Version: "1.4", Kernel: kernelCurrent, Device: deviceCurrent} + kernelInfoPatch = KernelInfo{Name: "patch", Version: newVersion, Kernel: kernelPatch, Device: deviceCurrent} + kernelInfoUpgrade = KernelInfo{Name: "upgrade", Version: "2.0", Kernel: kernelUpgrade, Device: deviceUpgrade} +} + +func TestGetVersionInfo(t *testing.T) { + tests := []struct { + name string + file string + version string + }{ + { + name: "Use mocked release file", + file: "./test_files/" + releaseFile, + version: "1.4", + }, + { + name: "Use a non-existing file", + file: "./test_files/vxos-release", + version: "", + }, + { + name: "Use file without information of prodcut version", + file: "./test_files/incorrect-release", + version: "", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + version := getVersionInfo(test.file) + if version != test.version { + t.Errorf("expected: [%v], but got [%v]", test.version, version) + } + }) + } +} + +func TestGetKernelMap(t *testing.T) { + type mock struct { + device string + kernels string + } + + tests := []struct { + name string + mock map[string]mock + bootVolumes map[string]map[string]string + newVersion string + kernelMap []KernelInfo + }{ + { + name: "Kick Start", + mock: map[string]mock{ + pathCurrent["path"]: mock{ + device: deviceCurrent, + kernels: kernelCurrent, + }, + }, + bootVolumes: map[string]map[string]string{"current": pathCurrent}, + newVersion: "", + kernelMap: []KernelInfo{kernelInfoCurrent}, + }, + { + name: "Factory Reset", + mock: map[string]mock{ + pathCurrent["path"]: mock{ + device: deviceCurrent, + kernels: kernelCurrent, + }, + pathUpgrade["path"]: mock{ + device: deviceUpgrade, + kernels: kernelUpgrade, + }, + }, + bootVolumes: map[string]map[string]string{"upgrade": pathUpgrade}, + newVersion: "", + kernelMap: []KernelInfo{kernelInfoUpgrade}, + }, + { + name: "Upgrade", + mock: map[string]mock{ + pathCurrent["path"]: mock{ + device: deviceCurrent, + kernels: kernelCurrent, + }, + pathUpgrade["path"]: mock{ + device: deviceUpgrade, + kernels: kernelUpgrade, + }, + }, + bootVolumes: map[string]map[string]string{"current": pathCurrent, "upgrade": pathUpgrade}, + newVersion: "", + kernelMap: []KernelInfo{kernelInfoCurrent, kernelInfoUpgrade}, + }, + { + name: "Patch", + mock: map[string]mock{ + pathCurrent["path"]: mock{ + device: deviceCurrent, + kernels: kernelCurrent + "\n" + kernelPatch, + }, + pathUpgrade["path"]: mock{ + device: deviceUpgrade, + kernels: kernelUpgrade, + }, + }, + bootVolumes: map[string]map[string]string{"current": pathCurrent}, + newVersion: newVersion, + kernelMap: []KernelInfo{kernelInfoCurrent, kernelInfoPatch}, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fExecCommand = func(operation string, command []string) string { + return fmt.Sprintf("%v\n", kernelCurrent) + } + + fGetVolumeInfo = func(volumePath string) (string, string) { + return test.mock[volumePath].device, test.mock[volumePath].kernels + } + + kernelMap := getKernelMap(releaseFile, test.bootVolumes, test.newVersion) + if !reflect.DeepEqual(kernelMap, test.kernelMap) { + t.Errorf("expected: [%v], but got [%v]", test.kernelMap, kernelMap) + } + }) + } +} diff --git a/boot/test_files/etc/vxos-release b/boot/test_files/etc/vxos-release new file mode 100644 index 0000000..017d1a6 --- /dev/null +++ b/boot/test_files/etc/vxos-release @@ -0,0 +1,2 @@ +product-name = Flex +product-version = 1.4 diff --git a/boot/test_files/system/upgrade/volume/etc/vxos-release b/boot/test_files/system/upgrade/volume/etc/vxos-release new file mode 100644 index 0000000..f2812cb --- /dev/null +++ b/boot/test_files/system/upgrade/volume/etc/vxos-release @@ -0,0 +1,2 @@ +product-name = Flex +product-version = 2.0 diff --git a/cmd/sum/sum.go b/cmd/sum/sum.go index 05d24ba..158d73d 100644 --- a/cmd/sum/sum.go +++ b/cmd/sum/sum.go @@ -1,19 +1,20 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2022 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 package main import ( "flag" "fmt" + "os" + "path/filepath" + "strings" + pm "github.com/VeritasOS/plugin-manager" // import "../../plugin-manager" - "github.com/VeritasOS/plugin-manager/config" - logutil "github.com/VeritasOS/plugin-manager/utils/log" + logger "github.com/VeritasOS/plugin-manager/utils/log" + "github.com/VeritasOS/software-update-manager/boot" "github.com/VeritasOS/software-update-manager/repo" "github.com/VeritasOS/software-update-manager/update" "github.com/VeritasOS/software-update-manager/validate" - "os" - "path/filepath" - "strings" ) var ( @@ -35,31 +36,34 @@ var mainCmdOptions struct { } func init() { - config.SetLogDir("/var/log/sum/") + if len(os.Args) < 2 { + fmt.Fprintf(os.Stderr, "Subcommand as operation is required.\n") + os.Exit(1) + } + logger.InitLogging() } func main() { absprogpath, err := filepath.Abs(os.Args[0]) if err != nil { - logutil.PrintNLogError("Failed to get the %s path.", progname) + logger.ConsoleError.Printf("Failed to get the %s path.", progname) } - if len(os.Args) < 2 { - fmt.Fprintf(os.Stderr, "Subcommand as operation is required.\n") - os.Exit(1) - } - - cmd := os.Args[1] - config.SetLogFile(cmd) - logutil.SetLogging(config.GetLogDir() + config.GetLogFile()) - mainRegisterCmdOptions() pm.RegisterCommandOptions(progname + " pm") repo.RegisterCommandOptions(progname + " repo") update.RegisterCommandOptions(progname) + cmd := os.Args[1] switch cmd { case "version": - logutil.PrintNLog("%s version %s %s\n", progname, version, buildDate) + logger.ConsoleInfo.Printf("%s version %s %s", progname, version, buildDate) + + case "boot": + err := boot.Exec(os.Args[1:]) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to execute boot command: %v.\n", err.Error()) + os.Exit(1) + } case "commit", "install", "reboot", "rollback": library := filepath.Clean( diff --git a/go.mod b/go.mod index 562a351..efbd3bb 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,6 @@ go 1.12 // replace plugin-manager => ../plugin-manager/ require ( - github.com/VeritasOS/plugin-manager v1.0.1 - gopkg.in/yaml.v2 v2.4.0 + github.com/VeritasOS/plugin-manager v1.0.4 + gopkg.in/yaml.v3 v3.0.1 ) diff --git a/go.sum b/go.sum index dd0bc19..26768e5 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,6 @@ +github.com/VeritasOS/plugin-manager v1.0.4 h1:d2nWuI/a7Q5cDJlkadR138wa2WIwQn47J1zJUFYQZX0= +github.com/VeritasOS/plugin-manager v1.0.4/go.mod h1:Fw92eqykT0patTJNxHsfcUcBzzvTaqxUPPhVEcg+Y8o= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/repo/add.go b/repo/add.go index 4868cad..812bdd0 100644 --- a/repo/add.go +++ b/repo/add.go @@ -1,37 +1,39 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2022 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 // Package repo defines software repository functions like listing, removing -// packages from software repository. +// +// packages from software repository. package repo import ( "flag" - "github.com/VeritasOS/plugin-manager/config" - logutil "github.com/VeritasOS/plugin-manager/utils/log" - osutils "github.com/VeritasOS/plugin-manager/utils/os" - "log" "os" "os/exec" "path/filepath" "strings" + + "github.com/VeritasOS/plugin-manager/config" + logger "github.com/VeritasOS/plugin-manager/utils/log" + osutils "github.com/VeritasOS/plugin-manager/utils/os" ) // Add the software file present in the staging area to the software repo after -// validation. +// +// validation. func Add(rpmPath string, params map[string]string) error { - log.Printf("Entering repo::Add(%v, %v)", rpmPath, params) - defer log.Println("Exiting repo::Add") + logger.Debug.Printf("Entering repo::Add(%v, %v)", rpmPath, params) + defer logger.Debug.Println("Exiting repo::Add") productVersion := params["productVersion"] swRepo := params["softwareRepo"] fi, err := os.Stat(rpmPath) if err != nil { - return logutil.PrintNLogError( + return logger.ConsoleError.PrintNReturnError( "Unable to stat on %s software. Error: %s\n", rpmPath, err.Error()) } else if fi.IsDir() { - return logutil.PrintNLogError( + return logger.ConsoleError.PrintNReturnError( "%s is not a valid software.\n", rpmPath) } @@ -44,14 +46,14 @@ func Add(rpmPath string, params map[string]string) error { // As listing was done for one RPM, the list is expected to have just one RPM. rpmType := info[0].GetRPMType() if "" == rpmType { - return logutil.PrintNLogError("Failed to determine the software type of the %s file.", + return logger.ConsoleError.PrintNReturnError("Failed to determine the software type of the %s file.", rpmPath) } rpmType = strings.ToLower(rpmType) repoTypeLocation := filepath.FromSlash(swRepo + "/" + rpmType) if err := osutils.OsMkdirAll(repoTypeLocation, 0755); nil != err { - return logutil.PrintNLogError("Failed to create the plugins logs directory: %s. "+ + return logger.ConsoleError.PrintNReturnError("Failed to create the plugins logs directory: %s. "+ "Error: %s", config.GetPluginsLogDir(), err.Error()) } @@ -59,22 +61,22 @@ func Add(rpmPath string, params map[string]string) error { cmdParams := []string{"-f", rpmPath, repoTypeLocation} cmd := exec.Command(os.ExpandEnv(cmdStr), cmdParams...) stdOutErr, err := cmd.CombinedOutput() - log.Println("Stdout & Stderr:", string(stdOutErr)) + logger.Debug.Println("Stdout & Stderr:", string(stdOutErr)) if err != nil { - log.Printf("Failed to move %s RPM to software update repository %s. Error: %s\n", + logger.Error.Printf("Failed to move %s RPM to software update repository %s, err=%s", rpmPath, repoTypeLocation, err.Error()) - return logutil.PrintNLogError("Failed to add %s software to "+ + return logger.ConsoleError.PrintNReturnError("Failed to add %s software to "+ "software repository.", rpmPath) } return nil } -// registerCommandAdd registers the add command that enables one to -// add the RPM to the software update repository. +// registerCommandAdd registers the add command that enables one to +// add the RPM to the software update repository. func registerCommandAdd(progname string) { - log.Printf("Entering repo::registerCommandAdd(%s)", progname) - defer log.Println("Exiting repo::registerCommandAdd") + logger.Debug.Printf("Entering repo::registerCommandAdd(%s)", progname) + defer logger.Debug.Println("Exiting repo::registerCommandAdd") cmdOptions.addCmd = flag.NewFlagSet(progname+" add", flag.PanicOnError) diff --git a/repo/list.go b/repo/list.go index ae682f7..82a9378 100644 --- a/repo/list.go +++ b/repo/list.go @@ -1,29 +1,39 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 // Package repo defines software repository functions like listing, removing -// packages from software repository. +// packages from software repository. package repo import ( + "crypto/sha256" + "encoding/hex" "flag" - logutil "github.com/VeritasOS/plugin-manager/utils/log" - "github.com/VeritasOS/plugin-manager/utils/output" - "github.com/VeritasOS/software-update-manager/utils/rpm" - "github.com/VeritasOS/software-update-manager/validate/version" + "io" "io/ioutil" - "log" "os" "path/filepath" "regexp" "strings" - "time" - "gopkg.in/yaml.v2" + logger "github.com/VeritasOS/plugin-manager/utils/log" + "github.com/VeritasOS/plugin-manager/utils/output" + "github.com/VeritasOS/software-update-manager/utils/rpm" + "github.com/VeritasOS/software-update-manager/validate/version" + + "gopkg.in/yaml.v3" ) -// FormatVersionName is the RPM format version string that's embedded -// into RPM used for identifying the JSON format version. -const FormatVersionName = "RPM Format Version" +// FormatVersionName is the ASUM RPM format version string that's embedded +// into RPM used for identifying the JSON format version. +const FormatVersionName = "ASUM RPM Format Version" + +var ( + fGetRPMPackageInfo = rpm.GetRPMPackageInfo + fGetChecksum = getChecksum + fListRepo = listRepo + fListRPMFilesInfo = ListRPMFilesInfo + fOpen = os.Open +) // RPMInfo is the list of RPM package info type RPMInfo interface { @@ -37,12 +47,13 @@ type RPMInfo interface { // Version 2 RPM Information related fields & helper functions below: // v2productVersion is the details for a given product-version from the -// version compatibility matrix/info JSON. +// version compatibility matrix/info JSON. type v2productVersion struct { Install struct { ConfirmationMessage []string `yaml:"confirmation-message"` EstimatedMinutes uint `yaml:"estimated-minutes"` RequiresRestart bool `yaml:"requires-restart"` + SupportsPrecheck bool `yaml:"supports-precheck"` SupportsRollback bool `yaml:"supports-rollback"` } `yaml:",omitempty"` Rollback struct { @@ -61,17 +72,15 @@ type v2RPMInfo struct { // Name of the RPM Name string // RPM file name - FileName string - Description []string - Type string - URL string - Version string - Release string - // NOTE: The time.Time value is getting chopped off while dumping output - // in json at ansible layer causing json unmarshal failure at consumer. - // so commenting for now. - // BuildDate time.Time - + FileName string + Description []string + Type string + URL string + Version string + Release string + Checksum string `yaml:"checksum,omitempty"` + DisplayType string `yaml:"display-type,omitempty"` + BuildDate string matchedVersion string v2productVersion `yaml:",inline"` } @@ -97,7 +106,7 @@ func (v2 v2RPMInfo) GetRPMVersion() string { } // GetMatchedVersion retrives the supported product-version from -// version compatibility matrix. +// version compatibility matrix. func (v2 v2RPMInfo) GetMatchedVersion() string { return v2.matchedVersion } @@ -106,7 +115,7 @@ func (v2 v2RPMInfo) GetMatchedVersion() string { // v1RPMInfo is the list of RPM package info type v1RPMInfo struct { - Description string `yaml:"description"` + Description []string `yaml:"description"` Estimate struct { Hours string `yaml:"hours"` Minutes string `yaml:"minutes"` @@ -121,6 +130,8 @@ type v1RPMInfo struct { Type string `yaml:"type"` URL string `yaml:"url"` Version string `yaml:"version"` + Checksum string `yaml:"checksum,omitempty"` + Release string `yaml:"release"` } // GetRPMName returns the name of the RPM. @@ -130,8 +141,7 @@ func (v1 v1RPMInfo) GetRPMName() string { // GetRPMRelease returns the release number of the RPM. func (v1 v1RPMInfo) GetRPMRelease() string { - // v1 doesn't support displaying release number, so return empty. - return "" + return v1.Release } // GetRPMType returns the type of the RPM. @@ -145,40 +155,28 @@ func (v1 v1RPMInfo) GetRPMVersion() string { } // GetMatchedVersion retrives the supported product-version from -// version compatibility matrix. +// version compatibility matrix. func (v1 v1RPMInfo) GetMatchedVersion() string { return v1.matchedVersion } -func parseDate(rawDate string) (time.Time, error) { - const dateLayout = "Mon 02 Jan 2006 03:04:05 PM MST" - t, err := time.Parse(dateLayout, rawDate) - if err != nil { - // INFO: On few systems, the build date appeared in - // ANSIC layout, so, try parsing with that. - t, err = time.Parse(time.ANSIC, rawDate) - } - if err != nil { - log.Printf("Failed to parse date: %s. Error: %s\n", rawDate, err) - } - return t, err -} - // List the packages present in the software repo along with their details. func List(params map[string]string) ([]RPMInfo, error) { - log.Printf("Entering repo::List(%v)", params) - defer log.Println("Exiting repo::List") + logger.Debug.Printf("Entering repo::List(%v)", params) + defer logger.Debug.Println("Exiting repo::List") productVersion := params["productVersion"] var info []RPMInfo - files, err := listRepo(params) + files, err := fListRepo(params) if err != nil { return info, err } + includeFields := params["includeFields"] + m := strings.Split(includeFields, ",") - info, err = ListRPMFilesInfo(files, productVersion) + info, err = fListRPMFilesInfo(files, productVersion, m...) if err != nil { return info, err } @@ -189,8 +187,8 @@ func List(params map[string]string) ([]RPMInfo, error) { } func listRepo(params map[string]string) ([]string, error) { - log.Printf("Entering repo::listRepo(%v)", params) - defer log.Println("Exiting repo::listRepo") + logger.Debug.Printf("Entering repo::listRepo(%v)", params) + defer logger.Debug.Println("Exiting repo::listRepo") swName := params["softwareName"] swRepo := params["softwareRepo"] @@ -205,26 +203,24 @@ func listRepo(params map[string]string) ([]string, error) { swTypes = append(swTypes, swType) } else { if _, err := os.Stat(swRepo); os.IsNotExist(err) { - logutil.PrintNLogWarning("Software repository '%s' does not exist.", - swRepo) + logger.ConsoleWarning.Printf("Software repository '%s' does not exist.", swRepo) return files, nil } dirs, err := ioutil.ReadDir(swRepo) if err != nil { - log.Printf("ioutil.ReadDir(%s); Error: %s", swRepo, err.Error()) - return files, logutil.PrintNLogError("Failed to get contents of software repository.") + logger.Error.Printf("ioutil.ReadDir(%s); err=%s", swRepo, err.Error()) + return files, logger.ConsoleError.PrintNReturnError("Failed to get contents of software repository.") } for _, dir := range dirs { curDir := filepath.FromSlash(swRepo + "/" + dir.Name()) fi, err := os.Stat(curDir) if err != nil { - log.Printf("Unable to stat on %s directory. Error: %s\n", - dir, err.Error()) + logger.Error.Printf("Unable to stat on %s directory. err=%s", dir, err.Error()) continue } if !fi.IsDir() { - log.Printf("%s is not a directory.\n", curDir) + logger.Error.Printf("%s is not a directory.", curDir) continue } @@ -237,15 +233,15 @@ func listRepo(params map[string]string) ([]string, error) { tfiles, err := ioutil.ReadDir(curDir) if err != nil { - log.Printf("Unable to read contents of %s directory. Error: %s\n", + logger.Error.Printf("Unable to read contents of %s directory. err=%s\n", curDir, err.Error()) } - log.Printf("%s files: %v", dir, tfiles) + logger.Debug.Printf("%s files: %v", dir, tfiles) for _, tf := range tfiles { - log.Printf("Package: %v", tf) + logger.Debug.Printf("Package: %v", tf) matched, err := regexp.MatchString("[.]rpm$", tf.Name()) if err != nil { - log.Printf("regexp.MatchString(%s, %s); Error: %s", + logger.Error.Printf("regexp.MatchString(%s, %s); err=%s", "[.]rpm", tf.Name(), err.Error()) continue } @@ -264,41 +260,79 @@ func listRepo(params map[string]string) ([]string, error) { return files, nil } +func getChecksum(src io.Reader) string { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + logger.ConsoleError.PrintNReturnError("Failed to get RPM file checksum. Error: [%v]", err.Error()) + } + return hex.EncodeToString(h.Sum(nil)) + +} + // ListRPMFilesInfo lists the info of the RPM files. -func ListRPMFilesInfo(files []string, productVersion string) ([]RPMInfo, error) { - log.Printf("Entering repo::ListRPMFilesInfo(%v, %v)", files, productVersion) - defer log.Println("Exiting repo::ListRPMFilesInfo") +// includeFields is a comma separated values you can provide to include extra fields other than default fields. +// As of now only "checksum" is extra field you can populate. +// If you are adding new field please make sure it should not be populated by default +// you need to use --include-fields flag. Other wise +// if you add new field in response then response time of ListRPMFilesInfo function may increase +func ListRPMFilesInfo(files []string, productVersion string, includeFields ...string) ([]RPMInfo, error) { + logger.Debug.Printf("Entering repo::ListRPMFilesInfo(%v, %v)", files, productVersion) + defer logger.Debug.Println("Exiting repo::ListRPMFilesInfo") var info []RPMInfo for _, file := range files { - metaData, err := rpm.GetRPMPackageInfo(filepath.FromSlash(file)) + metaData, err := fGetRPMPackageInfo(filepath.FromSlash(file)) if err != nil { - return info, logutil.PrintNLogError("Failed to get software details.") + return info, logger.ConsoleError.PrintNReturnError("Failed to get software details.") } parsedData := rpm.ParseMetaData(string(metaData)) + rpmName, err := rpm.GetRPMPackageName(filepath.FromSlash(file)) + if err != nil { + logger.ConsoleError.PrintNReturnError("Failed to get software name.") + rpmName = filepath.Base(file) + } + + var checksum string + for _, v := range includeFields { + if v == "checksum" { + f, err := fOpen(file) + if err != nil { + logger.ConsoleError.PrintNReturnError("Failed to get RPM file checksum. Error: [%v]", err.Error()) + } + checksum = fGetChecksum(f) + + defer f.Close() + } + } + if _, ok := parsedData[FormatVersionName]; !ok { listData := v1RPMInfo{ - Description: parsedData["Description"], + Description: []string{parsedData["Description"]}, FileName: filepath.Base(file), - Name: filepath.Base(file), + Name: rpmName, Summary: parsedData["Summary"], Type: parsedData["Type"], URL: parsedData["URL"], Version: parsedData["Version"], Reboot: "n/a", + Release: parsedData["Release"], } + listData.Estimate.Hours = "0" listData.Estimate.Minutes = "0" listData.Estimate.Seconds = "0" + + listData.Checksum = checksum if "" != productVersion { versionInfo, err := version.GetCompatibileVersionInfo(productVersion, parsedData["VersionInfo"]) //In case of error, i.e the version of rpm and product version is not compatible we ignore error and contiune execution //This error is expected when the rpm is already applied. List API should still return rpm details if err != nil { - log.Printf("Error in GetCompatibileVersionInfo::(%s)...", - err) + logger.Error.Printf("Error in GetCompatibileVersionInfo, err=%v", err) } + + listData.Description = append(listData.Description, versionInfo.Description...) listData.matchedVersion = versionInfo.Version listData.Reboot = versionInfo.Reboot listData.Estimate.Hours = versionInfo.Estimate.Hours @@ -308,7 +342,7 @@ func ListRPMFilesInfo(files []string, productVersion string) ([]RPMInfo, error) info = append(info, listData) } else { - log.Printf("%s: %v", FormatVersionName, parsedData[FormatVersionName]) + logger.Debug.Printf("%s: %v", FormatVersionName, parsedData[FormatVersionName]) listData := v2RPMInfo{ FileName: filepath.Base(file), @@ -320,34 +354,32 @@ func ListRPMFilesInfo(files []string, productVersion string) ([]RPMInfo, error) rpmInfo := parsedData["RPM Info"] err := yaml.Unmarshal([]byte(rpmInfo), &listData) if err != nil { - log.Printf("yaml.Unmarshal(%s, %+v); Error: %s", + logger.Error.Printf("yaml.Unmarshal(%s, %+v); err=%s", rpmInfo, &listData, err.Error()) } - t, err := parseDate(parsedData["Build Date"]) - if err != nil { - log.Printf("Build date: %+v\n", t) - // listData.BuildDate = t - } + listData.BuildDate = parsedData["Build Date"] + listData.Checksum = checksum if "" != productVersion { allVersionsInfo := struct { VersionInfo []struct { Version string `yaml:"product-version"` v2productVersion `yaml:",inline"` + Description []string `yaml:"description"` } `yaml:"compatibility-info"` }{} err := yaml.Unmarshal([]byte(rpmInfo), &allVersionsInfo) if err != nil { - log.Printf("yaml.Unmarshal(%s, %+v); Error: %s", + logger.Error.Printf("yaml.Unmarshal(%s, %+v); err=%s", rpmInfo, &allVersionsInfo, err.Error()) } - // INFO: First check Version as-is, // if there is no match, then do pattern comparison. for _, vInfo := range allVersionsInfo.VersionInfo { if productVersion == vInfo.Version { listData.v2productVersion = vInfo.v2productVersion listData.matchedVersion = vInfo.Version + listData.Description = append(listData.Description, vInfo.Description...) } } if listData.matchedVersion == "" { @@ -367,11 +399,11 @@ func ListRPMFilesInfo(files []string, productVersion string) ([]RPMInfo, error) return info, nil } -// registerCommandList registers the list command that enables one to -// view the RPMs present in the software update repository. +// registerCommandList registers the list command that enables one to +// view the RPMs present in the software update repository. func registerCommandList(progname string) { - log.Printf("Entering repo::registerCommandList(%s)", progname) - defer log.Println("Exiting repo::registerCommandList") + logger.Debug.Printf("Entering repo::registerCommandList(%s)", progname) + defer logger.Debug.Println("Exiting repo::registerCommandList") cmdOptions.listCmd = flag.NewFlagSet(progname+" list", flag.PanicOnError) @@ -400,6 +432,14 @@ func registerCommandList(progname string) { "", "Type of the software.", ) + cmdOptions.listCmd.StringVar( + &cmdOptions.includeFields, + "include-fields", + "", + "In -include-fields comma separated values can be define. "+ + "By default checksum field is not populate in response."+ + "By using -include-fields='checksum' flag it will get populated in response", + ) output.RegisterCommandOptions(cmdOptions.listCmd, map[string]string{"output-format": "yaml"}) } diff --git a/repo/list_test.go b/repo/list_test.go index 800bb46..aebeadc 100644 --- a/repo/list_test.go +++ b/repo/list_test.go @@ -1,13 +1,20 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 // Package repo defines software repository functions like listing, removing -// packages from software repository. +// packages from software repository. package repo import ( + "io" + "io/ioutil" + "log" + "os" "reflect" "testing" - "time" + + logger "github.com/VeritasOS/plugin-manager/utils/log" + + "gopkg.in/yaml.v3" ) func TestList(t *testing.T) { @@ -36,43 +43,115 @@ func TestList(t *testing.T) { } } -func Test_parseDate(t *testing.T) { - wantT1, _ := time.Parse("Mon 02 Jan 2006 03:04:05 PM MST", - "Wed 06 Jan 2021 04:48:21 PM PST") - wantT2, _ := time.Parse(time.ANSIC, "Thu Feb 4 22:08:16 2021") +func readFile(filename string) ([]byte, error) { + file, err := os.Open(filename) + if err != nil { + log.Fatal(err) + } + defer func() { + if err = file.Close(); err != nil { + log.Fatal(err) + } + }() + + return ioutil.ReadAll(file) +} + +// TODO: Fix this test +func _TestListRPMFilesInfo(t *testing.T) { + logger.InitFileLogger("test.log", "INFO") + type args struct { - rawDate string + files []string + productVersion string + includeFields string + metadataFilePath string } tests := []struct { name string args args - want time.Time + want string wantErr bool }{ { - name: "Build date layout 1", + name: "should return checksum alongwith default attributes", + args: args{ + files: []string{"VRTSflex-update-3.2.rpm"}, + productVersion: "2.1", + includeFields: "checksum", + metadataFilePath: "./test_files/valid-rpm-metadata.txt", + }, + want: "./test_files/include-checksum-response.yaml", + }, + { + name: "Should return default fields if --include-fields not provided", args: args{ - rawDate: "Wed 06 Jan 2021 04:48:21 PM PST", + files: []string{"VRTSflex-update-3.2.rpm"}, + productVersion: "2.1", + includeFields: "", + metadataFilePath: "./test_files/valid-rpm-metadata.txt", }, - want: wantT1, + want: "./test_files/default-response.yaml", }, { - name: "Build date layout 2", + name: "Should return default fields if invalid --include-fields provided", args: args{ - rawDate: "Thu Feb 4 22:08:16 2021", + files: []string{"VRTSflex-update-3.2.rpm"}, + productVersion: "2.1", + includeFields: "asas", + metadataFilePath: "./test_files/valid-rpm-metadata.txt", }, - want: wantT2, + want: "./test_files/default-response.yaml", }, } + + originalGetChecksum := fGetChecksum + originalfGetRPMPackageInfo := fGetRPMPackageInfo + defer func() { + fGetChecksum = originalGetChecksum + fOpen = os.Open + fGetRPMPackageInfo = originalfGetRPMPackageInfo + }() + fGetChecksum = func(src io.Reader) string { + return "random_checksum" + } + fOpen = func(name string) (*os.File, error) { + return nil, nil + } for _, tt := range tests { + fGetRPMPackageInfo = func(rpmPath string) ([]byte, error) { + return readFile(tt.args.metadataFilePath) + } t.Run(tt.name, func(t *testing.T) { - got, err := parseDate(tt.args.rawDate) + got, err := ListRPMFilesInfo(tt.args.files, tt.args.productVersion, tt.args.includeFields) if (err != nil) != tt.wantErr { - t.Errorf("parseDate() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("List() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("parseDate() = %v, want %v", got, tt.want) + + jsonOut, err := yaml.Marshal(got) + if err != nil { + panic(err) + } + expectedByte, _ := readFile(tt.want) + + ex := []byte{} + g := []byte{} + counter := -1 + + for i, b := range expectedByte { + if string(jsonOut[i]) != string(b) { + ex = append(ex, jsonOut[i]) + g = append(g, b) + counter = 0 + } else { + if counter == 0 && len(g) != 0 { + t.Errorf("Got = %v, expected %v", string(g), string(ex)) + } + ex = ex[:0] + g = g[:0] + counter = -1 + } } }) } diff --git a/repo/remove.go b/repo/remove.go index 75d1846..fc7805b 100644 --- a/repo/remove.go +++ b/repo/remove.go @@ -1,24 +1,26 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2022 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 // Package repo defines software repository functions like listing, removing -// packages from software repository. +// +// packages from software repository. package repo import ( "flag" - logutil "github.com/VeritasOS/plugin-manager/utils/log" - osutils "github.com/VeritasOS/plugin-manager/utils/os" - "log" "os" "path/filepath" "strings" + + logger "github.com/VeritasOS/plugin-manager/utils/log" + osutils "github.com/VeritasOS/plugin-manager/utils/os" ) // registerCommandRemove registers the remove command that enables one to -// remove the RPM of the specified type from the software update repository. +// +// remove the RPM of the specified type from the software update repository. func registerCommandRemove(progname string) { - log.Printf("Entering repo::registerCommandRemove(%s)", progname) - defer log.Println("Exiting repo::registerCommandRemove") + logger.Debug.Printf("Entering repo::registerCommandRemove(%s)", progname) + defer logger.Debug.Println("Exiting repo::registerCommandRemove") cmdOptions.removeCmd = flag.NewFlagSet(progname+" remove", flag.PanicOnError) cmdOptions.removeCmd.StringVar( @@ -43,16 +45,16 @@ func registerCommandRemove(progname string) { // Remove the specified software package from the software repo. func Remove(swName, swType, swRepo string) error { - log.Printf("Entering repo::Remove(%s, %s, %s)", swName, swType, swRepo) - defer log.Println("Exiting repo::Remove") + logger.Debug.Printf("Entering repo::Remove(%s, %s, %s)", swName, swType, swRepo) + defer logger.Debug.Println("Exiting repo::Remove") if swRepo == "" { - return logutil.PrintNLogError("Unable to remove %s software %s. "+ + return logger.ConsoleError.PrintNReturnError("Unable to remove %s software %s. "+ "Failed to determine software repository.", swType, swName) } if swName != "" && swType == "" { - return logutil.PrintNLogError("Invalid usage. Software type must be specified when software name is specified.") + return logger.ConsoleError.PrintNReturnError("Invalid usage. Software type must be specified when software name is specified.") } absSwPath := filepath.Clean(filepath.FromSlash(swRepo + @@ -61,23 +63,21 @@ func Remove(swName, swType, swRepo string) error { fi, err := os.Stat(absSwPath) if err != nil { - log.Printf("Unable to stat on %s: %+v. Error: %s\n", - absSwPath, fi, err.Error()) - return logutil.PrintNLogError("Unable to remove %s software %s. "+ + logger.Error.Printf("Unable to stat on %s: %+v, err=%s", absSwPath, fi, err.Error()) + return logger.ConsoleError.PrintNReturnError("Unable to remove %s software %s. "+ "Specified software not found.", swType, swName) } err = osutils.OsRemoveAll(absSwPath) if err != nil { - log.Printf("Unable to remove on %s. Error: %s\n", - absSwPath, err.Error()) - return logutil.PrintNLogError("Failed to remove %s software %s.", + logger.Error.Printf("Unable to remove on %s, err=%s", absSwPath, err.Error()) + return logger.ConsoleError.PrintNReturnError("Failed to remove %s software %s.", swType, swName) } - log.Printf("Successfully removed %s software", absSwPath) - logutil.PrintNLog("Successfully removed %s software %s from repository.\n", + logger.Info.Printf("Successfully removed %s software", absSwPath) + logger.ConsoleInfo.Printf("Successfully removed %s software %s from repository.", swType, swName) return nil } diff --git a/repo/remove_test.go b/repo/remove_test.go index 038d6ef..022100b 100644 --- a/repo/remove_test.go +++ b/repo/remove_test.go @@ -1,14 +1,16 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2022 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 // Package repo defines software repository functions like listing, removing -// packages from software repository. +// +// packages from software repository. package repo import ( - logutil "github.com/VeritasOS/plugin-manager/utils/log" "os" "strings" "testing" + + logger "github.com/VeritasOS/plugin-manager/utils/log" ) func TestRemove(t *testing.T) { @@ -63,6 +65,8 @@ func TestRemove(t *testing.T) { wantErr: false, }, } + // Set log file name to "test", so that cleaning becomes easier. + logger.InitFileLogger("test.log", "INFO") for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var err error @@ -87,7 +91,7 @@ func TestRemove(t *testing.T) { if err != nil { t.Errorf("Failed to create new file %s. Error: %s.", path, err.Error()) } - logutil.PrintNLog("New file details: %+v\n", fi.Name()) + logger.ConsoleInfo.Printf("New file details: %+v", fi.Name()) fi.Close() } } diff --git a/repo/repo.go b/repo/repo.go index 123db83..3978716 100644 --- a/repo/repo.go +++ b/repo/repo.go @@ -1,17 +1,18 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2023 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 // Package repo defines software repository functions like listing, removing -// packages from software repository. +// +// packages from software repository. package repo import ( "flag" "fmt" - logutil "github.com/VeritasOS/plugin-manager/utils/log" - "log" "os" "path/filepath" "strings" + + logger "github.com/VeritasOS/plugin-manager/utils/log" ) // SoftwareRepoPath is the Software Update Repository path. @@ -54,12 +55,16 @@ var cmdOptions struct { // outputFormat indicates the output format to write the results. // Supported formats are "json", "yaml". outputFormat string + + // includeFields indicates additional RPM attributes to be read other than + // default ones + includeFields string } // RegisterCommandOptions registers the supported commands. func RegisterCommandOptions(progname string) { - log.Printf("Entering repo::RegisterCommandOptions(%s)", progname) - defer log.Println("Exiting repo::RegisterCommandOptions") + logger.Debug.Printf("Entering repo::RegisterCommandOptions(%s)", progname) + defer logger.Debug.Println("Exiting repo::RegisterCommandOptions") registerCommandAdd(progname) registerCommandList(progname) @@ -68,8 +73,8 @@ func RegisterCommandOptions(progname string) { } func registerCommandVersion(progname string) { - log.Printf("Entering repo::registerCommandVersion(%s)", progname) - defer log.Println("Exiting repo::registerCommandVersion") + logger.Debug.Printf("Entering repo::registerCommandVersion(%s)", progname) + defer logger.Debug.Println("Exiting repo::registerCommandVersion") cmdOptions.versionCmd = flag.NewFlagSet(progname+" version", flag.ContinueOnError) cmdOptions.versionPtr = cmdOptions.versionCmd.Bool( @@ -82,13 +87,13 @@ func registerCommandVersion(progname string) { // ScanCommandOptions scans for the command line options and makes appropriate // function call. // Input: -// 1. map[string]interface{} -// where, the options could be following: -// "progname": Name of the program along with any cmds (ex: sum pm) -// "cmd-index": Index to the cmd (ex: run) +// 1. map[string]interface{} +// where, the options could be following: +// "progname": Name of the program along with any cmds (ex: asum pm) +// "cmd-index": Index to the cmd (ex: run) func ScanCommandOptions(options map[string]interface{}) error { - log.Printf("Entering ScanCommandOptions(%+v)...", options) - defer log.Println("Exiting ScanCommandOptions") + logger.Debug.Printf("Entering ScanCommandOptions(%+v)...", options) + defer logger.Debug.Println("Exiting ScanCommandOptions") progname := filepath.Base(os.Args[0]) cmdIndex := 1 @@ -99,17 +104,17 @@ func ScanCommandOptions(options map[string]interface{}) error { cmdIndex = valI.(int) } cmd := os.Args[cmdIndex] - log.Println("progname:", progname, "cmd with arguments:", os.Args[cmdIndex:]) + logger.Debug.Println("progname:", progname, "cmd with arguments:", os.Args[cmdIndex:]) var err error switch cmd { case "version": - logutil.PrintNLog("Software Repository Manager version %s\n", myVersion) + logger.ConsoleInfo.Printf("Software Repository Manager version %s", myVersion) case "add": err = cmdOptions.addCmd.Parse(os.Args[3:]) if err != nil { - return logutil.PrintNLogError(cmd, "command arguments parse error:", err.Error()) + return logger.ConsoleError.PrintNReturnError("Command arguments parse error, cmd=%s, err=%s", cmd, err.Error()) } err = Add(cmdOptions.softwarePath, map[string]string{ @@ -119,7 +124,7 @@ func ScanCommandOptions(options map[string]interface{}) error { case "list": err = cmdOptions.listCmd.Parse(os.Args[3:]) if err != nil { - return logutil.PrintNLogError(cmd, "command arguments parse error:", err.Error()) + return logger.ConsoleError.PrintNReturnError("Command arguments parse error, cmd=%s, err=%s", cmd, err.Error()) } params := map[string]string{ @@ -129,13 +134,14 @@ func ScanCommandOptions(options map[string]interface{}) error { "productVersion": cmdOptions.productVersion, "outputFile": cmdOptions.outputFile, "outputFormat": cmdOptions.outputFormat, + "includeFields": cmdOptions.includeFields, } _, err = List(params) case "remove": err = cmdOptions.removeCmd.Parse(os.Args[3:]) if err != nil { - return logutil.PrintNLogError(cmd, "command arguments parse error:", err.Error()) + return logger.ConsoleError.PrintNReturnError("Command arguments parse error, cmd=%s, err=%s", cmd, err.Error()) } err = Remove(cmdOptions.softwareName, cmdOptions.softwareType, cmdOptions.softwareRepo) @@ -144,7 +150,7 @@ func ScanCommandOptions(options map[string]interface{}) error { if len(os.Args) == cmdIndex+2 { subcmd = os.Args[cmdIndex+1] } else if len(os.Args) > cmdIndex+2 { - fmt.Fprintf(os.Stderr, "usage: %s help command\n\nToo many arguments (%d) given.\n", progname, len(os.Args)) + fmt.Fprintf(os.Stderr, "Usage: %s help command\n\nToo many arguments (%d) given.\n", progname, len(os.Args)) os.Exit(2) } usage(progname, subcmd) diff --git a/repo/test_files/default-response.yaml b/repo/test_files/default-response.yaml new file mode 100644 index 0000000..27dff4a --- /dev/null +++ b/repo/test_files/default-response.yaml @@ -0,0 +1,29 @@ +- name: VRTSflex-update + filename: VRTSflex-update-3.2.rpm + description: + - This release is primarily comprised of performance improvements and minor bug + fixes. + - Stop or relocate all running instances before you start the update on each node. + - 'Note: If you choose to roll back after the update, you must stop all instances + on all nodes.' + type: update + url: https://www.veritas.com/support/en_US/doc/130821112-145890001-0 + version: "3.2" + release: "20230314183059" + install: + confirmation-message: + - Make sure you've stopped or relocated the instances before you start the update. + estimated-minutes: 90 + requires-restart: true + supports-precheck: false + supports-rollback: true + rollback: + confirmation-message: + - You must stop the instances on all appliance nodes before you start the rollback. + The rollback requires a restart because the snapshot needs to be reverted. + estimated-minutes: 40 + requires-restart: true + commit: + confirmation-message: + - Once the update is committed, you cannot roll back. + estimated-minutes: 5 \ No newline at end of file diff --git a/repo/test_files/include-checksum-response.yaml b/repo/test_files/include-checksum-response.yaml new file mode 100644 index 0000000..a167208 --- /dev/null +++ b/repo/test_files/include-checksum-response.yaml @@ -0,0 +1,30 @@ +- name: VRTSflex-update + filename: VRTSflex-update-3.2.rpm + description: + - This release is primarily comprised of performance improvements and minor bug + fixes. + - Stop or relocate all running instances before you start the update on each node. + - 'Note: If you choose to roll back after the update, you must stop all instances + on all nodes.' + type: update + url: https://www.veritas.com/support/en_US/doc/130821112-145890001-0 + version: "3.2" + release: "20230314183059" + checksum: random_checksum + install: + confirmation-message: + - Make sure you've stopped or relocated the instances before you start the update. + estimated-minutes: 90 + requires-restart: true + supports-precheck: false + supports-rollback: true + rollback: + confirmation-message: + - You must stop the instances on all appliance nodes before you start the rollback. + The rollback requires a restart because the snapshot needs to be reverted. + estimated-minutes: 40 + requires-restart: true + commit: + confirmation-message: + - Once the update is committed, you cannot roll back. + estimated-minutes: 5 \ No newline at end of file diff --git a/repo/test_files/valid-rpm-metadata.txt b/repo/test_files/valid-rpm-metadata.txt new file mode 100644 index 0000000..3b37a76 --- /dev/null +++ b/repo/test_files/valid-rpm-metadata.txt @@ -0,0 +1,20 @@ +Name : VRTSflex-update +Version : 3.2 +Release : 20230314183059 +Architecture: x86_64 +Install Date: (not installed) +Group : Unspecified +Size : 5009390361 +License : Copyright (c) 2023 Veritas Technologies LLC. All rights reserved. +Signature : RSA/SHA256, Wed Mar 15 01:51:36 2023, Key ID cf784714d9712e70 +Source RPM : VRTSflex-update-3.2-20230314183059.src.rpm +Build Date : Wed Mar 15 01:12:33 2023 +Build Host : dlauncher-odpnmgwq-1678842268-appbuildervm161 +Relocations : (not relocatable) +Packager : Appliance Solutions +Vendor : Veritas Technologies LLC +URL : https://www.veritas.com/support/en_US/doc/130821112-145890001-0 +Summary : thunder_cloud 3.2 Update for thunder_cloud 3.2. +Description : +ASUM RPM Format Version : 2 +RPM Info : {"description":["This release is primarily comprised of performance improvements and minor bug fixes.","Stop or relocate all running instances before you start the update on each node.","Note: If you choose to roll back after the update, you must stop all instances on all nodes."],"type":"update","compatibility-info":[{"product-version":"2.1.*","install":{"confirmation-message":["Make sure you've stopped or relocated the instances before you start the update."],"requires-restart":true,"supports-rollback":true,"estimated-minutes":90},"rollback":{"confirmation-message":["You must stop the instances on all appliance nodes before you start the rollback. The rollback requires a restart because the snapshot needs to be reverted."],"requires-restart":true,"estimated-minutes":40},"commit":{"confirmation-message":["Once the update is committed, you cannot roll back."],"estimated-minutes":5}},{"product-version":"3.0.*","install":{"confirmation-message":["Make sure you've stopped or relocated the instances before you start the update."],"requires-restart":true,"supports-rollback":true,"estimated-minutes":90},"rollback":{"confirmation-message":["You must stop the instances on all appliance nodes before you start the rollback. The rollback requires a restart because the snapshot needs to be reverted."],"requires-restart":true,"estimated-minutes":40},"commit":{"confirmation-message":["Once the update is committed, you cannot roll back."],"estimated-minutes":5}},{"product-version":"3.2.*","install":{"confirmation-message":["This is B2B please remove this block before release. Make sure you've stopped or relocated the instances before you start the update."],"requires-restart":true,"supports-rollback":true,"supports-precheck":true,"estimated-minutes":90},"rollback":{"confirmation-message":["You must stop the instances on all appliance nodes before you start the rollback. The rollback requires a restart because the snapshot needs to be reverted."],"requires-restart":true,"estimated-minutes":40},"commit":{"confirmation-message":["Once the update is committed, you cannot roll back."],"estimated-minutes":5}},{"product-version":"3.1.*","install":{"confirmation-message":["This is B2B please remove this block before release. Make sure you've stopped or relocated the instances before you start the update."],"requires-restart":true,"supports-rollback":true,"supports-precheck":true,"estimated-minutes":90},"rollback":{"confirmation-message":["You must stop the instances on all appliance nodes before you start the rollback. The rollback requires a restart because the snapshot needs to be reverted."],"requires-restart":true,"estimated-minutes":40},"commit":{"confirmation-message":["Once the update is committed, you cannot roll back."],"estimated-minutes":5}}]} \ No newline at end of file diff --git a/update/update.go b/update/update.go index 72bdb0a..b229cdd 100644 --- a/update/update.go +++ b/update/update.go @@ -1,20 +1,20 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 package update import ( "flag" "fmt" + "os" + "os/exec" + "path/filepath" + pm "github.com/VeritasOS/plugin-manager" // import "../plugin-manager" "github.com/VeritasOS/plugin-manager/config" - logutil "github.com/VeritasOS/plugin-manager/utils/log" + logger "github.com/VeritasOS/plugin-manager/utils/log" "github.com/VeritasOS/plugin-manager/utils/output" "github.com/VeritasOS/software-update-manager/repo" "github.com/VeritasOS/software-update-manager/utils/rpm" - "log" - "os" - "os/exec" - "path/filepath" ) // RPMInstallRepoPath is the path where RPM contents are expected to installed/extracted. @@ -33,24 +33,24 @@ type Status struct { // continue onto the next operation. // Ex: Install can receive auto-reboot=true, in which after installation // is completed successfully, `sum` will run reboot operation. - Install []pm.RunStatus `yaml:",omitempty"` - Reboot []pm.RunStatus `yaml:",omitempty"` - Rollback []pm.RunStatus `yaml:",omitempty"` - Commit []pm.RunStatus `yaml:",omitempty"` + Install []pm.Plugin `yaml:",omitempty"` + Reboot []pm.Plugin `yaml:",omitempty"` + Rollback []pm.Plugin `yaml:",omitempty"` + Commit []pm.Plugin `yaml:",omitempty"` Status string StdOutErr string } // Commit runs the commit-precheck and commit plugins of the update workflow. func Commit(result *Status, library string) bool { - log.Println("Entering update::Commit") - defer log.Println("Exiting update::Commit") + logger.Debug.Println("Entering update::Commit") + defer logger.Debug.Println("Exiting update::Commit") // Plugin Types to run for update workflow as part of user commit action. pluginTypes := []string{"commit-precheck", "commit"} for _, pt := range pluginTypes { - result.Commit = append(result.Commit, pm.RunStatus{}) + result.Commit = append(result.Commit, pm.Plugin{}) resIdx := len(result.Commit) - 1 err := runPM(&result.Commit[resIdx], pt, library) @@ -58,20 +58,22 @@ func Commit(result *Status, library string) bool { return false } } + result.Status = dStatusOk + output.Write(result) return true } // Install runs the preinstall and install plugins of the update workflow. func Install(result *Status, library string) bool { - log.Println("Entering update::Install") - defer log.Println("Exiting update::Install") + logger.Debug.Println("Entering update::Install") + defer logger.Debug.Println("Exiting update::Install") // Plugin Types to run for update workflow as part of install script. pluginTypes := []string{"preinstall", "install"} var err error for _, pt := range pluginTypes { - result.Install = append(result.Install, pm.RunStatus{}) + result.Install = append(result.Install, pm.Plugin{}) resIdx := len(result.Install) - 1 err = runPM(&result.Install[resIdx], pt, library) @@ -85,27 +87,28 @@ func Install(result *Status, library string) bool { // INFO: Discard rollback errors, and always return false to // indicate installation failure. pt := "rollback" - result.Install = append(result.Install, pm.RunStatus{}) + result.Install = append(result.Install, pm.Plugin{}) resIdx := len(result.Install) - 1 runPM(&result.Install[resIdx], pt, library) return false } result.Status = dStatusOk + output.Write(result) return true } // runCmdFromRPM installs the specified software package from the software repo. func runCmdFromRPM(action, swName, swType string, params map[string]string) error { - log.Printf("Entering update::runCmdFromRPM(%s, %s, %s, %+v)", + logger.Debug.Printf("Entering update::runCmdFromRPM(%s, %s, %s, %+v)", action, swName, swType, params) - defer log.Println("Exiting update::runCmdFromRPM") + defer logger.Debug.Println("Exiting update::runCmdFromRPM") if swName == "" { - return logutil.PrintNLogError("Invalid usage. Software name must be specified.") + return logger.ConsoleError.PrintNReturnError("Invalid usage. Software name must be specified.") } if swType == "" { - return logutil.PrintNLogError("Invalid usage. Software type must be specified.") + return logger.ConsoleError.PrintNReturnError("Invalid usage. Software type must be specified.") } swRepo := params["softwareRepo"] if swRepo == "" { @@ -118,9 +121,8 @@ func runCmdFromRPM(action, swName, swType string, params map[string]string) erro fi, err := os.Stat(absSwPath) if err != nil { - log.Printf("Unable to stat on %s: %+v. Error: %s\n", - absSwPath, fi, err.Error()) - return logutil.PrintNLogError("Unable to install %s software %s. "+ + logger.Error.Printf("Unable to stat on %s: %+v, err=%s", absSwPath, fi, err.Error()) + return logger.ConsoleError.PrintNReturnError("Unable to install %s software %s. "+ "Specified software not found.", swType, swName) } @@ -134,17 +136,13 @@ func runCmdFromRPM(action, swName, swType string, params map[string]string) erro params["softwareType"] = swType listInfo, err := repo.List(params) if err != nil { - log.Printf("Failed to repo.List(). Error: %s\n", - err.Error()) + logger.Error.Printf("Failed to repo.List(), err=%s", err.Error()) return err } // Since we had passed softwareName, there is expected to be only one in the list, so get that. if 1 != len(listInfo) { - log.Printf("The repo list is expected to have details of %s software, "+ - "but got %+v.", - swName, listInfo) - return logutil.PrintNLogError("Failed to get details of %s software.", - swName) + logger.Error.Printf("The repo list is expected to have details of %s software, but got %+v.", swName, listInfo) + return logger.ConsoleError.PrintNReturnError("Failed to get details of %s software.", swName) } rpmInfo := listInfo[0] @@ -159,7 +157,7 @@ func runCmdFromRPM(action, swName, swType string, params map[string]string) erro err = rpm.Install(absSwPath) if err != nil { - return logutil.PrintNLogError("Failed to install software.") + return logger.ConsoleError.PrintNReturnError("Failed to install software.") } } @@ -167,40 +165,38 @@ func runCmdFromRPM(action, swName, swType string, params map[string]string) erro fmt.Sprintf("%s-%s-%s", rpmInfo.GetRPMName(), rpmInfo.GetRPMVersion(), rpmInfo.GetRPMRelease()) + string(os.PathSeparator) + action - log.Println("Script to be invoked:", script) + logger.Info.Println("Script to be invoked:", script) const cmdStr = "/bin/sh" cmdParams := []string{script, "-output-file", output.GetFile(), "-output-format", output.GetFormat()} cmd := exec.Command(os.ExpandEnv(cmdStr), cmdParams...) stdOutErr, err := cmd.CombinedOutput() - log.Println("Stdout & Stderr:", string(stdOutErr)) + logger.Debug.Println("Stdout & Stderr:", string(stdOutErr)) if err != nil { - log.Printf("Failed to run %s script of %s RPM. Error: %s\n", - script, absSwPath, err.Error()) + logger.Error.Printf("Failed to run %s script of %s RPM, err=%s", script, absSwPath, err.Error()) if "install" == action { rpm.Uninstall(rpmInfo.GetRPMName()) } - return logutil.PrintNLogError("Failed to %s software.", action) + return logger.ConsoleError.PrintNReturnError("Failed to %s software.", action) } - log.Printf("Successfully completed %s of %s software", action, absSwPath) - logutil.PrintNLog("Successfully completed %s of %s software %s from repository.\n", + logger.Info.Printf("Successfully completed %s of %s software", action, absSwPath) + logger.ConsoleInfo.Printf("Successfully completed %s of %s software %s from repository.\n", action, swType, swName) return nil } -// Reboot runs the prereboot plugins and reboot the system as part of the -// update workflow. +// Reboot runs the prereboot plugins and reboot the system as part of the update workflow. func Reboot(result *Status, library string) bool { - log.Println("Entering update::Reboot") - defer log.Println("Exiting update::Reboot") + logger.Debug.Println("Entering update::Reboot") + defer logger.Debug.Println("Exiting update::Reboot") // Plugin Types to run for update workflow as part of prereboot script. pluginTypes := []string{"prereboot"} for _, pt := range pluginTypes { - result.Reboot = append(result.Reboot, pm.RunStatus{}) + result.Reboot = append(result.Reboot, pm.Plugin{}) resIdx := len(result.Reboot) - 1 err := runPM(&result.Reboot[resIdx], pt, library) @@ -210,13 +206,15 @@ func Reboot(result *Status, library string) bool { // INFO: Discard rollback errors, and always return false to // indicate reboot failure. pt := "rollback" - result.Reboot = append(result.Reboot, pm.RunStatus{}) + result.Reboot = append(result.Reboot, pm.Plugin{}) resIdx := len(result.Reboot) - 1 runPM(&result.Reboot[resIdx], pt, library) return false } } + result.Status = dStatusOk + output.Write(result) // Reboot the system after prereboot plugins are run successfully. cmdStr := "systemctl" @@ -224,44 +222,40 @@ func Reboot(result *Status, library string) bool { cmd := exec.Command(os.ExpandEnv(cmdStr), cmdParams...) stdOutErr, err := cmd.CombinedOutput() - log.Println("Stdout & Stderr:", string(stdOutErr)) + logger.Debug.Println("Stdout & Stderr:", string(stdOutErr)) if err != nil { - log.Printf("Failed to reboot the system. Error: %s\n", err.Error()) - result.StdOutErr = "Failed to reboot the system." - return false + logger.Error.Printf("Failed to reboot the system, err=%s", err.Error()) } return true } -func runPM(result *pm.RunStatus, pluginType, library string) error { - log.Println("Entering update::runPM") - defer log.Println("Exiting update::runPM") +func runPM(result *pm.Plugin, pluginType, library string) error { + logger.Debug.Println("Entering update::runPM") + defer logger.Debug.Println("Exiting update::runPM") - logutil.PrintNLog("Running %s plugins...", pluginType) + logger.ConsoleInfo.Printf("Running %s plugins...", pluginType) config.SetPluginsLibrary(library) - err := pm.Run(result, pluginType) + err := pm.RunFromLibrary(result, pluginType, pm.RunOptions{Library: library}) if err != nil { - log.Printf("Failed to run %s plugins. Error: %s\n", - pluginType, err.Error()) + logger.Error.Printf("Failed to run %s plugins, err=%s", pluginType, err.Error()) return err } fmt.Println() return nil } -// Rollback runs the required rollback plugins of the update workflow in the -// new version/partition. +// Rollback runs the required rollback plugins of the update workflow in the new version/partition. func Rollback(result *Status, library string) bool { - log.Println("Entering update::Rollback") - defer log.Println("Exiting update::Rollback") + logger.Debug.Println("Entering update::Rollback") + defer logger.Debug.Println("Exiting update::Rollback") // Plugin Types to run for update workflow as part of rollback script. pluginTypes := []string{"rollback-precheck", "prerollback"} for _, pt := range pluginTypes { - result.Rollback = append(result.Rollback, pm.RunStatus{}) + result.Rollback = append(result.Rollback, pm.Plugin{}) resIdx := len(result.Rollback) - 1 err := runPM(&result.Rollback[resIdx], pt, library) @@ -270,6 +264,7 @@ func Rollback(result *Status, library string) bool { } } result.Status = dStatusOk + output.Write(result) return true } @@ -320,8 +315,8 @@ func registerCmdOptions(f *flag.FlagSet) { // registerCommandCommit registers install command and its options func registerCommandCommit(progname string) { - log.Printf("Entering update::registerCommandCommit(%s)", progname) - defer log.Println("Exiting update::registerCommandCommit") + logger.Debug.Printf("Entering update::registerCommandCommit(%s)", progname) + defer logger.Debug.Println("Exiting update::registerCommandCommit") cmdOptions.commitCmd = flag.NewFlagSet(progname+" commit", flag.PanicOnError) registerCmdOptions(cmdOptions.commitCmd) @@ -329,8 +324,8 @@ func registerCommandCommit(progname string) { // registerCommandInstall registers install command and its options func registerCommandInstall(progname string) { - log.Printf("Entering update::registerCommandInstall(%s)", progname) - defer log.Println("Exiting update::registerCommandInstall") + logger.Debug.Printf("Entering update::registerCommandInstall(%s)", progname) + defer logger.Debug.Println("Exiting update::registerCommandInstall") cmdOptions.installCmd = flag.NewFlagSet(progname+" install", flag.PanicOnError) registerCmdOptions(cmdOptions.installCmd) @@ -338,8 +333,8 @@ func registerCommandInstall(progname string) { // registerCommandReboot registers reboot command and its options func registerCommandReboot(progname string) { - log.Printf("Entering update::registerCommandReboot(%s)", progname) - defer log.Println("Exiting update::registerCommandReboot") + logger.Info.Printf("Entering update::registerCommandReboot(%s)", progname) + defer logger.Info.Println("Exiting update::registerCommandReboot") cmdOptions.rebootCmd = flag.NewFlagSet(progname+" reboot", flag.PanicOnError) registerCmdOptions(cmdOptions.rebootCmd) @@ -347,8 +342,8 @@ func registerCommandReboot(progname string) { // registerCommandRollback registers rollback command and its options func registerCommandRollback(progname string) { - log.Printf("Entering update::registerCommandRollback(%s)", progname) - defer log.Println("Exiting update::registerCommandRollback") + logger.Debug.Printf("Entering update::registerCommandRollback(%s)", progname) + defer logger.Debug.Println("Exiting update::registerCommandRollback") cmdOptions.rollbackCmd = flag.NewFlagSet(progname+" rollback", flag.PanicOnError) registerCmdOptions(cmdOptions.rollbackCmd) @@ -356,8 +351,8 @@ func registerCommandRollback(progname string) { // RegisterCommandOptions registers the command options that are supported func RegisterCommandOptions(progname string) { - log.Println("Entering update::RegisterCommandOptions") - defer log.Println("Exiting update::RegisterCommandOptions") + logger.Debug.Println("Entering update::RegisterCommandOptions") + defer logger.Debug.Println("Exiting update::RegisterCommandOptions") registerCommandCommit(progname) registerCommandInstall(progname) @@ -368,13 +363,13 @@ func RegisterCommandOptions(progname string) { // ScanCommandOptions scans for the command line options and makes appropriate // function call. // Input: -// 1. map[string]interface{} -// where, the options could be following: -// "progname": Name of the program along with any cmds (ex: sum pm) -// "cmd-index": Index to the cmd (ex: run) +// 1. map[string]interface{} +// where, the options could be following: +// "progname": Name of the program along with any cmds (ex: sum pm) +// "cmd-index": Index to the cmd (ex: run) func ScanCommandOptions(options map[string]interface{}) error { - log.Printf("Entering ScanCommandOptions(%+v)...", options) - defer log.Println("Exiting ScanCommandOptions") + logger.Debug.Printf("Entering ScanCommandOptions(%+v)...", options) + defer logger.Debug.Println("Exiting ScanCommandOptions") progname := filepath.Base(os.Args[0]) cmdIndex := 1 @@ -385,7 +380,7 @@ func ScanCommandOptions(options map[string]interface{}) error { cmdIndex = valI.(int) } cmd := os.Args[cmdIndex] - log.Println("progname: ", progname, " cmd with arguments: ", os.Args[cmdIndex:]) + logger.Debug.Println("Progname: ", progname, " cmd with arguments: ", os.Args[cmdIndex:]) var err error switch cmd { @@ -418,7 +413,7 @@ func ScanCommandOptions(options map[string]interface{}) error { } if err != nil { - return logutil.PrintNLogError(cmd, "command arguments parse error:", err.Error()) + return logger.ConsoleError.PrintNReturnError("Command arguments parse error, cmd=%s, err=%s", cmd, err.Error()) } if cmdOptions.softwareName != "" { @@ -440,16 +435,12 @@ func ScanCommandOptions(options map[string]interface{}) error { case "rollback": ret = Rollback(&status, library) } - if ret { - status.Status = dStatusOk - } else { - err = logutil.PrintNLogError("Failed to %s the update.", cmd) + if !ret { + err = logger.ConsoleError.PrintNReturnError("Failed to %s the update.", cmd) status.Status = dStatusFail status.StdOutErr = err.Error() + output.Write(status) } - - output.Write(status) - } return err } diff --git a/utils/rpm/rpm.go b/utils/rpm/rpm.go index f956a84..18be2ce 100644 --- a/utils/rpm/rpm.go +++ b/utils/rpm/rpm.go @@ -1,16 +1,16 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2022 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 -// Package rpm contains utility functions (required by SUM) for managing RPM files. +// Package rpm contains utility functions (required by ASUM) for managing RPM files. package rpm import ( - logutil "github.com/VeritasOS/plugin-manager/utils/log" - "log" "os" "os/exec" "path/filepath" "regexp" "strings" + + logger "github.com/VeritasOS/plugin-manager/utils/log" ) // Cmd is the path of the `rpm` command. @@ -18,32 +18,52 @@ const Cmd = "/usr/bin/rpm" // GetRPMPackageInfo queries and retrieves RPM file info. func GetRPMPackageInfo(rpmPath string) ([]byte, error) { - log.Printf("Entering rpm::GetRPMPackageInfo(%s)", rpmPath) - defer log.Println("Exiting rpm::GetRPMPackageInfo") + logger.Debug.Printf("Entering rpm::GetRPMPackageInfo(%s)", rpmPath) + defer logger.Info.Println("Exiting rpm::GetRPMPackageInfo") cmdParams := []string{"-q", "-p", "--info", filepath.FromSlash(rpmPath)} cmd := exec.Command(os.ExpandEnv(Cmd), cmdParams...) stdOutErr, err := cmd.CombinedOutput() - log.Println("Stdout & Stderr:", string(stdOutErr)) + logger.Debug.Println("Stdout & Stderr:", string(stdOutErr)) if err != nil { - log.Printf("Failed to get %s RPM details. Error: %s\n", + logger.Error.Printf("Failed to get %s RPM details, err=%s", rpmPath, err.Error()) return stdOutErr, err } return stdOutErr, nil } +// GetRPMPackageName queries and retrieves RPM file name. +func GetRPMPackageName(rpmPath string) (string, error) { + logger.Debug.Printf("Entering rpm::GetRPMPackageName(%s)", rpmPath) + defer logger.Debug.Println("Exiting rpm::GetRPMPackageName") + + cmdParams := []string{"-q", "-p", filepath.FromSlash(rpmPath)} + cmd := exec.Command(os.ExpandEnv(Cmd), cmdParams...) + stdOutErr, err := cmd.CombinedOutput() + logger.Debug.Println("Stdout & Stderr:", string(stdOutErr)) + if err != nil { + logger.Error.Printf("Failed to get %s RPM details, err=%s", + rpmPath, err.Error()) + return string(stdOutErr), err + } + rpmName := string(stdOutErr) + rpmName = strings.TrimSuffix(rpmName, "\n") + return rpmName, nil + +} + // IsInstalled tells whether RPM is installed on the system. func IsInstalled(rpmName string) bool { - log.Printf("Entering rpm::IsInstalled(%s)", rpmName) - defer log.Println("Exiting rpm::IsInstalled") + logger.Debug.Printf("Entering rpm::IsInstalled(%s)", rpmName) + defer logger.Debug.Println("Exiting rpm::IsInstalled") cmdParams := []string{"-q", rpmName} cmd := exec.Command(os.ExpandEnv(Cmd), cmdParams...) stdOutErr, err := cmd.CombinedOutput() - log.Println("Stdout & Stderr:", string(stdOutErr)) + logger.Debug.Println("Stdout & Stderr:", string(stdOutErr)) if err != nil { - log.Printf("Failed to query on %s RPM. Error: %s\n", + logger.Error.Printf("Failed to query on %s RPM, err=%s", rpmName, err.Error()) return false } @@ -52,15 +72,15 @@ func IsInstalled(rpmName string) bool { // Install the specified RPM file. func Install(rpmPath string) error { - log.Printf("Entering rpm::Install(%s)", rpmPath) - defer log.Println("Exiting rpm::Install") + logger.Debug.Printf("Entering rpm::Install(%s)", rpmPath) + defer logger.Debug.Println("Exiting rpm::Install") cmdParams := []string{"-Uvh", filepath.FromSlash(rpmPath)} cmd := exec.Command(os.ExpandEnv(Cmd), cmdParams...) stdOutErr, err := cmd.CombinedOutput() - log.Println("Stdout & Stderr:", string(stdOutErr)) + logger.Debug.Println("Stdout & Stderr:", string(stdOutErr)) if err != nil { - log.Printf("Failed to install %s RPM. Error: %s\n", + logger.Error.Printf("Failed to install %s RPM, err=%s", rpmPath, err.Error()) return err } @@ -68,10 +88,11 @@ func Install(rpmPath string) error { } // ParseMetaData parses the RPM metadata -// into key-value pair. +// +// into key-value pair. func ParseMetaData(metaData string) map[string]string { - log.Printf("Entering rpm::ParseMetaData()") - defer log.Println("Exiting rpm::ParseMetaData") + logger.Debug.Printf("Entering rpm::ParseMetaData()") + defer logger.Debug.Println("Exiting rpm::ParseMetaData") parsedData := map[string]string{} key := "" @@ -80,8 +101,7 @@ func ParseMetaData(metaData string) map[string]string { for _, line := range strings.Split(metaData, "\n") { matched, err := regexp.MatchString(pattern, line) if err != nil { - log.Printf("regexp.MatchString(%s, %s); Error: %s", - "[.]rpm", line, err.Error()) + logger.Error.Printf("regexp.MatchString(%s, %s), err=%s", pattern, line, err.Error()) continue } if matched { @@ -100,17 +120,16 @@ func ParseMetaData(metaData string) map[string]string { // Uninstall erases/uninstalls the specified RPM from node. func Uninstall(rpmName string) error { - log.Printf("Entering rpm::Uninstall(%s)", rpmName) - defer log.Println("Exiting rpm::Uninstall") + logger.Debug.Printf("Entering rpm::Uninstall(%s)", rpmName) + defer logger.Debug.Println("Exiting rpm::Uninstall") cmdParams := []string{"-e", rpmName} cmd := exec.Command(os.ExpandEnv(Cmd), cmdParams...) stdOutErr, err := cmd.CombinedOutput() - log.Println("Stdout & Stderr:", string(stdOutErr)) + logger.Debug.Println("Stdout & Stderr:", string(stdOutErr)) if err != nil { - log.Printf("Failed to remove %s RPM. Error: %s\n", - rpmName, err.Error()) - return logutil.PrintNLogError("Failed to uninstall %s software.", rpmName) + logger.Error.Printf("Failed to remove %s RPM, err=%s", rpmName, err.Error()) + return logger.ConsoleError.PrintNReturnError("Failed to uninstall %s software.", rpmName) } return nil } diff --git a/utils/rpm/rpm_test.go b/utils/rpm/rpm_test.go index 1516f4b..65810f1 100644 --- a/utils/rpm/rpm_test.go +++ b/utils/rpm/rpm_test.go @@ -1,12 +1,15 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2022 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 // Package repo defines software repository functions like listing, removing -// packages from software repository. +// +// packages from software repository. package rpm import ( "reflect" "testing" + + logger "github.com/VeritasOS/plugin-manager/utils/log" ) func Test_ParseMetaData(t *testing.T) { @@ -34,7 +37,7 @@ Source RPM : platformx-upgrade-3.3.13-20200325132333.src.rpm Build Date : Wed 25 Mar 2020 02:42:20 PM PDT Build Host : k8s-prod-rhel72-10g-7tdz2 Relocations : /system/upgrade/repository/platformx_core -Packager : Veritas Technologies LLC +Packager : Appliance Solutions Vendor : Veritas Technologies LLC URL : https://www.veritas.com/support/en_US.html Summary : Provides platformx-upgrade. @@ -51,7 +54,7 @@ VersionInfo:[{"Version":"0.0.9","Reboot":"Yes","Estimate":{"hours":"0","minutes" "Group": "Unspecified", "License": "Copyright (c) 2019 Veritas Technologies LLC. All rights reserved.", "Name": "platformx-upgrade", - "Packager": "Veritas Technologies LLC", + "Packager": "Appliance Solutions", "Release": "20200325132333", "Relocations": "/system/upgrade/repository/platformx_core", "Signature": "RSA/SHA256, Wed 25 Mar 2020 03:05:55 PM PDT, Key ID cf784714d9712e70", @@ -70,7 +73,7 @@ VersionInfo:[{"Version":"0.0.9","Reboot":"Yes","Estimate":{"hours":"0","minutes" { name: "V2 RPM output", args: args{ - data: `Name : VRTSasum-update + data: `Name : VRTSvxos-asum-update Version : 2.0.1 Release : 20200723001743 Architecture: x86_64 @@ -79,7 +82,7 @@ Group : Unspecified Size : 6098439 License : Copyright (c) 2020 Veritas Technologies LLC. All rights reserved. Signature : RSA/SHA256, Mon 06 Jul 2020 02:41:26 PM PDT, Key ID cf784714d9712e70 -Source RPM : VRTSasum-update-2.0.1-20200723001743.src.rpm +Source RPM : VRTSvxos-asum-update-2.0.1-20200723001743.src.rpm Build Date : Wed 22 Jul 2020 05:17:50 PM PDT Build Host : builder Relocations : (not relocatable) @@ -87,33 +90,35 @@ Packager : Veritas AS DevOps URL : https://www.veritas.com/support/en_US.html Summary : Sample Update RPM created using ASUM SDK Description : -RPM Format Version : 2 +ASUM RPM Format Version : 2 RPM Info : {"Description":["Sample multi-line description of the update RPM.","Client scripts/GUI can display each item in this list in a separate para with appropriate linebreaks."],"Type":"Update","VersionInfo":[{"Version":"*","install":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like restarting node, ","and confirming that users have stopped instances."],"requires-restart":true,"supports-rollback":false,"estimated-minutes":35},"rollback":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like restarting node, ","and confirming that users have stopped instances."],"requires-restart":true,"estimated-minutes":20},"commit":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like once update is committed, you cannot roll back.","Also, committing node restarts some services."],"estimated-minutes":5}},{"Version":"3.*","install":{"confirmation-message":["Sample multi-line confirmation message","","Display warning messages and instructions like stopping instances."],"requires-restart":false,"supports-rollback":false,"estimated-minutes":25},"rollback":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like roll back requires restarting node, ","(even though install didn't require restart), as snapshot needs to be reverted."],"requires-restart":true,"estimated-minutes":40},"commit":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like once update is committed, you cannot roll back."],"estimated-minutes":5}}]} `, }, want: map[string]string{ - "RPM Format Version": "2", - "Architecture": "x86_64", - "Build Date": "Wed 22 Jul 2020 05:17:50 PM PDT", - "Build Host": "builder", - "Description": "", - "Group": "Unspecified", - "Install Date": "(not installed)", - "License": "Copyright (c) 2020 Veritas Technologies LLC. All rights reserved.", - "Name": "VRTSasum-update", - "Packager": "Veritas AS DevOps ", - "RPM Info": `{"Description":["Sample multi-line description of the update RPM.","Client scripts/GUI can display each item in this list in a separate para with appropriate linebreaks."],"Type":"Update","VersionInfo":[{"Version":"*","install":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like restarting node, ","and confirming that users have stopped instances."],"requires-restart":true,"supports-rollback":false,"estimated-minutes":35},"rollback":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like restarting node, ","and confirming that users have stopped instances."],"requires-restart":true,"estimated-minutes":20},"commit":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like once update is committed, you cannot roll back.","Also, committing node restarts some services."],"estimated-minutes":5}},{"Version":"3.*","install":{"confirmation-message":["Sample multi-line confirmation message","","Display warning messages and instructions like stopping instances."],"requires-restart":false,"supports-rollback":false,"estimated-minutes":25},"rollback":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like roll back requires restarting node, ","(even though install didn't require restart), as snapshot needs to be reverted."],"requires-restart":true,"estimated-minutes":40},"commit":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like once update is committed, you cannot roll back."],"estimated-minutes":5}}]}`, - "Release": "20200723001743", - "Relocations": "(not relocatable)", - "Signature": "RSA/SHA256, Mon 06 Jul 2020 02:41:26 PM PDT, Key ID cf784714d9712e70", - "Size": "6098439", - "Source RPM": "VRTSasum-update-2.0.1-20200723001743.src.rpm", - "Summary": "Sample Update RPM created using ASUM SDK", - "URL": "https://www.veritas.com/support/en_US.html", - "Version": "2.0.1", + "ASUM RPM Format Version": "2", + "Architecture": "x86_64", + "Build Date": "Wed 22 Jul 2020 05:17:50 PM PDT", + "Build Host": "builder", + "Description": "", + "Group": "Unspecified", + "Install Date": "(not installed)", + "License": "Copyright (c) 2020 Veritas Technologies LLC. All rights reserved.", + "Name": "VRTSvxos-asum-update", + "Packager": "Veritas AS DevOps ", + "RPM Info": `{"Description":["Sample multi-line description of the update RPM.","Client scripts/GUI can display each item in this list in a separate para with appropriate linebreaks."],"Type":"Update","VersionInfo":[{"Version":"*","install":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like restarting node, ","and confirming that users have stopped instances."],"requires-restart":true,"supports-rollback":false,"estimated-minutes":35},"rollback":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like restarting node, ","and confirming that users have stopped instances."],"requires-restart":true,"estimated-minutes":20},"commit":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like once update is committed, you cannot roll back.","Also, committing node restarts some services."],"estimated-minutes":5}},{"Version":"3.*","install":{"confirmation-message":["Sample multi-line confirmation message","","Display warning messages and instructions like stopping instances."],"requires-restart":false,"supports-rollback":false,"estimated-minutes":25},"rollback":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like roll back requires restarting node, ","(even though install didn't require restart), as snapshot needs to be reverted."],"requires-restart":true,"estimated-minutes":40},"commit":{"confirmation-message":["Sample multi-line confirmation message","","Display info messages and instructions like once update is committed, you cannot roll back."],"estimated-minutes":5}}]}`, + "Release": "20200723001743", + "Relocations": "(not relocatable)", + "Signature": "RSA/SHA256, Mon 06 Jul 2020 02:41:26 PM PDT, Key ID cf784714d9712e70", + "Size": "6098439", + "Source RPM": "VRTSvxos-asum-update-2.0.1-20200723001743.src.rpm", + "Summary": "Sample Update RPM created using ASUM SDK", + "URL": "https://www.veritas.com/support/en_US.html", + "Version": "2.0.1", }, }, } + // Set log file name to "test", so that cleaning becomes easier. + logger.InitFileLogger("test.log", "INFO") for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := ParseMetaData(tt.args.data); !reflect.DeepEqual(got, tt.want) { diff --git a/validate/validate.go b/validate/validate.go index 6c28173..31b8a55 100644 --- a/validate/validate.go +++ b/validate/validate.go @@ -1,26 +1,28 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 package validate import ( "flag" "fmt" - logutil "github.com/VeritasOS/plugin-manager/utils/log" - "github.com/VeritasOS/software-update-manager/repo" - "github.com/VeritasOS/software-update-manager/validate/version" - "log" "os" "os/exec" "path/filepath" "strings" + + logger "github.com/VeritasOS/plugin-manager/utils/log" + "github.com/VeritasOS/software-update-manager/repo" + "github.com/VeritasOS/software-update-manager/validate/version" ) var execCommand = exec.Command +const syscfgCmd = "/usr/bin/syscfg/syscfg" + func isRPMCompatibile(productVersion string, rpmFile string) error { - log.Printf("Entering validate::isRPMCompatibile(%s, %s)...", + logger.Debug.Printf("Entering validate::isRPMCompatibile(%s, %s)...", productVersion, rpmFile) - defer log.Println("Exiting validate::isRPMCompatibile") + defer logger.Debug.Println("Exiting validate::isRPMCompatibile") err := fileExists(rpmFile) if err != nil { @@ -34,7 +36,7 @@ func isRPMCompatibile(productVersion string, rpmFile string) error { curInfo := info[0] if !version.Compare(productVersion, curInfo.GetMatchedVersion()) { - return logutil.PrintNLogError("The %s software file is not compatibile for %s version.", + return logger.ConsoleError.PrintNReturnError("The %s software file is not compatibile for %s version.", filepath.Base(rpmFile), productVersion) } return nil @@ -52,7 +54,7 @@ func isSigned(rpmFile string) error { rpmSign := strings.TrimSpace(string(out)) if strings.Contains(rpmSign, "(none)") { - return logutil.PrintNLogError("RPM file %s is not signed. Only "+ + return logger.ConsoleError.PrintNReturnError("RPM file %s is not signed. Only "+ "install updates that have been downloaded from or provided by Veritas", rpmFile) } @@ -61,13 +63,30 @@ func isSigned(rpmFile string) error { } func verifySign(rpmFile string) error { - - rpmCmd := execCommand("rpm", "-Kv", rpmFile) + /* + * In RHEL8, RPM installation fails if the RPM is built without digest. + * Set the separation build time as: Dec 05, 2022 00:00:00 GMT (Gordon GA date)(1670198400) + * before that build time, the rpms are built without package or header digests, skip digest check + * after that build time, the rpms are built with digest, will use normal rpm installation. + */ + var rpmCmd *exec.Cmd + cmdString := "/bin/rpm -qp --queryformat '%{BUILDTIME}' '" + rpmFile + "'" + rpmCmd = execCommand("/bin/sh", "-c", cmdString) out, err := rpmCmd.CombinedOutput() - log.Printf(string(out)) + if err != nil { + err = fmt.Errorf("Failed to execute RPM command "+cmdString+". Error %v", err.Error()) + return err + } + if string(out) < "1670198400" { + rpmCmd = execCommand("/bin/rpm", "-Kv", "--nodigest", rpmFile) + } else { + rpmCmd = execCommand("/bin/rpm", "-Kv", rpmFile) + } + out, err = rpmCmd.CombinedOutput() + logger.Debug.Println(string(out)) if err != nil { - err = logutil.PrintNLogError("Signature validation failed for %s. "+ + err = logger.ConsoleError.PrintNReturnError("Signature validation failed for %s. "+ "Only install updates that have been downloaded from or provided by "+ "Veritas. Error %v", rpmFile, err.Error()) @@ -102,12 +121,40 @@ func fileExists(filePath string) error { return nil } +func validateBiosPassword(biosPassword string) error { + if _, err := os.Stat("/sys/firmware/efi"); os.IsNotExist(err) { + // Not an EFI system + return nil + } + if _, err := os.Stat(syscfgCmd); os.IsNotExist(err) { + // syscfg command not found + return fmt.Errorf("syscfg command not found") + } + + var validateCmd *exec.Cmd + vCommand := syscfgCmd + " /bcs '" + biosPassword + "' 'Advanced' 'Intel(R) Virtualization Technology' 1" + validateCmd = execCommand("sh", "-c", vCommand) + + _, err := validateCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("bios password is incorrect. Error %v", err.Error()) + } + return nil +} + // Exec executes rpm validation func Exec(args []string) { os.Args = args versionFlag := flag.Bool("version", false, "command for validating rpm version: validate -version -rpm= -product-version= ") signFlag := flag.Bool("signature", false, "command for validating rpm version: validate -signature -rpm= ") + biosPwFlag := flag.Bool( + "biospassword", + false, + `Validate BIOS password. +Set environment variable 'BIOS_PASSWORD' to validate password. +Using env var to avoid password getting logged and also process list (ps) showing password`, + ) rpmFile := flag.String("rpm", "", "path of rpm file") productVersion := flag.String("product-version", "", "product version") @@ -135,7 +182,20 @@ func Exec(args []string) { os.Exit(1) } } - if !*signFlag && !*versionFlag { + + if *biosPwFlag { + // INFO: Using environment variable BIOS_PASSWORD to validate password, so as to avoid + // 1. password getting logged + // 2. process list (ps) showing password + passwd := os.Getenv("BIOS_PASSWORD") + err := validateBiosPassword(passwd) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err.Error()) + os.Exit(1) + } + } + + if !*signFlag && !*versionFlag && !*biosPwFlag { flag.PrintDefaults() os.Exit(1) } diff --git a/validate/validate_test.go b/validate/validate_test.go index 96915d2..2330ee6 100644 --- a/validate/validate_test.go +++ b/validate/validate_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2022 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 package validate @@ -7,19 +7,28 @@ import ( "os" "os/exec" "strconv" + "strings" "testing" + + logger "github.com/VeritasOS/plugin-manager/utils/log" ) -var mockedExitStatus = 0 -var mockedStdout string +type cmdRep struct { + cmdOut string + exitCode int +} + +var mockedCmdExec map[string]cmdRep +// Based on different command, return different result func fakeExecCommand(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestExecCommandHelper", "--", command} cs = append(cs, args...) cmd := exec.Command(os.Args[0], cs...) - es := strconv.Itoa(mockedExitStatus) + fullCmd := command + " " + strings.Join(args, " ") + es := strconv.Itoa(mockedCmdExec[fullCmd].exitCode) cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1", - "STDOUT=" + mockedStdout, + "STDOUT=" + mockedCmdExec[fullCmd].cmdOut, "EXIT_STATUS=" + es} return cmd } @@ -35,47 +44,94 @@ func TestExecCommandHelper(t *testing.T) { func Test_verifySign(t *testing.T) { type args struct { - rpmFile string - errorMsg string - exitStatus int + rpmFile string // function input + cmdExec map[string]cmdRep // cmd input and response } tests := []struct { name string args args wantErr bool + errMsg string }{ { - name: "Valid signature", + name: "Signature ealier than Gorden GA date, skip digest check", // # 1670198400 + args: args{ + rpmFile: "asum.rpm", + cmdExec: map[string]cmdRep{ + "/bin/sh -c /bin/rpm -qp --queryformat '%{BUILDTIME}' 'asum.rpm'": { + cmdOut: "1670198000", + exitCode: 0, + }, + "/bin/rpm -Kv --nodigest asum.rpm": { + cmdOut: "", + exitCode: 0, + }, + "/bin/rpm -Kv asum.rpm": { + cmdOut: "", + exitCode: 1, + }, + }, + }, + errMsg: "", + wantErr: false, + }, + { + name: "Signature later than Gorden GA date, digest check succeeds", args: args{ - rpmFile: "asum.rpm", - errorMsg: "", - exitStatus: 0, + rpmFile: "asum.rpm", + cmdExec: map[string]cmdRep{ + "/bin/sh -c /bin/rpm -qp --queryformat '%{BUILDTIME}' 'asum.rpm'": { + cmdOut: "1670200000", + exitCode: 0, + }, + "/bin/rpm -Kv --nodigest asum.rpm": { + cmdOut: "", + exitCode: 1, + }, + "/bin/rpm -Kv asum.rpm": { + cmdOut: "", + exitCode: 0, + }, + }, }, + errMsg: "", wantErr: false, }, { - name: "Invalid signature", + name: "Signature later than Gorden GA date, digest check fails", args: args{ rpmFile: "asum.rpm", - errorMsg: "Signature validation failed for asum.rpm. Only " + - "install updates that have been downloaded from or " + - "provided by Veritas. Error exit status 1", - exitStatus: 1, + cmdExec: map[string]cmdRep{ + "/bin/sh -c /bin/rpm -qp --queryformat '%{BUILDTIME}' 'asum.rpm'": { + cmdOut: "1670200000", + exitCode: 0, + }, + "/bin/rpm -Kv --nodigest asum.rpm": { + cmdOut: "", + exitCode: 1, + }, + "/bin/rpm -Kv asum.rpm": { + cmdOut: "", + exitCode: 1, + }, + }, }, + errMsg: "Signature validation failed for asum.rpm. Only install updates that have been downloaded from or provided by Veritas. Error exit status 1", wantErr: true, }, } + // Set log file name to "test", so that cleaning becomes easier. + logger.InitFileLogger("test.log", "INFO") for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mockedExitStatus = tt.args.exitStatus - mockedStdout = "" + mockedCmdExec = tt.args.cmdExec execCommand = fakeExecCommand defer func() { execCommand = exec.Command }() err := verifySign(tt.args.rpmFile) - if (err != nil) && (tt.wantErr) && (err.Error() != tt.args.errorMsg) { - t.Errorf("expected: <%v>, but got <%v>", tt.args.errorMsg, err.Error()) + if (err != nil) && (tt.wantErr) && (err.Error() != tt.errMsg) { + t.Errorf("expected: <%v>, but got <%v>", tt.errMsg, err.Error()) } }) } @@ -83,68 +139,70 @@ func Test_verifySign(t *testing.T) { func Test_isSigned(t *testing.T) { type args struct { - rpmFile string - errorMsg string - exitStatus int - stdout string + rpmFile string // function input + cmdExec map[string]cmdRep // cmd input and response } tests := []struct { name string args args wantErr bool + errMsg string }{ { name: "RPM is signed", args: args{ - rpmFile: "asum.rpm", - errorMsg: "", - exitStatus: 0, - stdout: "RSA/SHA1, Wed 06 Feb 2019 06:50:08 PM PST, Key ID cf784714d9712e70", - }, - wantErr: false, - }, - { - name: "RPM is signed", - args: args{ - rpmFile: "asum (1).rpm", - errorMsg: "", - exitStatus: 0, - stdout: "RSA/SHA1, Wed 06 Feb 2019 06:50:08 PM PST, Key ID cf784714d9712e70", + rpmFile: "asum.rpm", + cmdExec: map[string]cmdRep{ + "sh -c rpm -qip 'asum.rpm' | grep Signature | cut -d ':' -f 2-": { + cmdOut: "RSA/SHA1, Wed 06 Feb 2019 06:50:08 PM PST, Key ID cf784714d9712e70", + exitCode: 0, + }, + }, }, + errMsg: "", wantErr: false, }, { name: "RPM command throws error", args: args{ - rpmFile: "asum.rpm", - errorMsg: "Failed to execute RPM command rpm -qip 'asum.rpm' | grep Signature | cut -d ':' -f 2-. Error exit status 1", - exitStatus: 1, - stdout: "", + rpmFile: "asum.rpm", + cmdExec: map[string]cmdRep{ + "sh -c rpm -qip 'asum.rpm' | grep Signature | cut -d ':' -f 2-": { + cmdOut: "", + exitCode: 1, + }, + }, }, + errMsg: "Failed to execute RPM command rpm -qip 'asum.rpm' | grep Signature | cut -d ':' -f 2-. Error exit status 1", wantErr: true, }, { name: "RPM file is not signed", args: args{ - rpmFile: "asum.rpm", - errorMsg: "RPM file asum.rpm is not signed. Only install updates that have been downloaded from or provided by Veritas", - exitStatus: 0, - stdout: "(none)", + rpmFile: "asum.rpm", + cmdExec: map[string]cmdRep{ + "sh -c rpm -qip 'asum.rpm' | grep Signature | cut -d ':' -f 2-": { + cmdOut: "(none)", + exitCode: 0, + }, + }, }, + errMsg: "RPM file asum.rpm is not signed. Only install updates that have been downloaded from or provided by Veritas", wantErr: true, }, } + // Set log file name to "test", so that cleaning becomes easier. + logger.InitFileLogger("test.log", "INFO") for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mockedExitStatus = tt.args.exitStatus - mockedStdout = tt.args.stdout + mockedCmdExec = tt.args.cmdExec execCommand = fakeExecCommand defer func() { execCommand = exec.Command }() err := isSigned(tt.args.rpmFile) - if (err != nil) && (tt.wantErr) && (err.Error() != tt.args.errorMsg) { - t.Errorf("expected: <%v>, but got <%v>", tt.args.errorMsg, err.Error()) + if (err != nil) && (tt.wantErr) && (err.Error() != tt.errMsg) { + t.Errorf("expected: <%v>, but got <%v>", tt.errMsg, err.Error()) } }) } diff --git a/validate/version/version.go b/validate/version/version.go index 2ce8919..639c6ec 100644 --- a/validate/version/version.go +++ b/validate/version/version.go @@ -1,19 +1,21 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2022 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 package version import ( "encoding/json" "fmt" - "log" "strings" + + logger "github.com/VeritasOS/plugin-manager/utils/log" ) // V1VersionInfo would be decoded from the V1VersionInfo JSON array. type V1VersionInfo struct { - Version string `json:"Version"` - Reboot string `json:"Reboot"` - Estimate struct { + Description []string `json:"Description"` + Version string `json:"Version"` + Reboot string `json:"Reboot"` + Estimate struct { Hours string `json:"hours"` Minutes string `json:"minutes"` Seconds string `json:"seconds"` @@ -21,10 +23,11 @@ type V1VersionInfo struct { } // Compare checks whether the specified version (with '*' patterns) -// matches the given product version. +// +// matches the given product version. func Compare(productVersion string, version string) bool { - log.Printf("Entering version::Compare(%s, %s)", productVersion, version) - defer log.Println("Exiting version::Compare") + logger.Debug.Printf("Entering version::Compare(%s, %s)", productVersion, version) + defer logger.Debug.Println("Exiting version::Compare") var num1, num2 string productVersionNums := strings.Split(productVersion, ".") @@ -54,15 +57,15 @@ func Compare(productVersion string, version string) bool { } func validateJSONFormat(versionInfoString string) ([]V1VersionInfo, error) { - log.Printf("Entering version::validateJSONFormat(%s)...", + logger.Debug.Printf("Entering version::validateJSONFormat(%s)...", versionInfoString) - defer log.Printf("Exiting version::validateJSONFormat...") + defer logger.Debug.Printf("Exiting version::validateJSONFormat...") versionInfoArray := make([]V1VersionInfo, 0) err := json.Unmarshal([]byte(versionInfoString), &versionInfoArray) if err != nil { - log.Printf("json.Unmarshal(%s, %v); Error: %s", + logger.Error.Printf("Failed to call json.Unmarshal(%s, %v), err=%s", versionInfoString, &versionInfoArray, err.Error()) err = fmt.Errorf("RPM V1VersionInfo is not in valid JSON format: %v", err) return versionInfoArray, err @@ -71,19 +74,17 @@ func validateJSONFormat(versionInfoString string) ([]V1VersionInfo, error) { } func validateVersion(productVersion string, versionInfoArray []V1VersionInfo) (V1VersionInfo, error) { - log.Printf("Entering version::validateVersion(%s, %s)...", + logger.Debug.Printf("Entering version::validateVersion(%s, %s)...", productVersion, versionInfoArray) - defer log.Println("Exiting version::validateVersion") + defer logger.Debug.Println("Exiting version::validateVersion") versionSet := map[string]bool{} info := V1VersionInfo{} for _, versionInfo := range versionInfoArray { if versionSet[versionInfo.Version] { - err := fmt.Errorf("Update version is not compatible for "+ - "the product version %v.", productVersion) - log.Printf("Error in ValidateVersion: %s", - err) + err := fmt.Errorf("updating version is not compatible for the product version %v", productVersion) + logger.Error.Printf("Validating version failed, err=%v", err) return info, err } versionSet[versionInfo.Version] = true @@ -97,12 +98,10 @@ func validateVersion(productVersion string, versionInfoArray []V1VersionInfo) (V info = versionInfo } } - - if (info == V1VersionInfo{}) { - err := fmt.Errorf("Update version is not "+ - "compatible for the product version %s.", productVersion) - log.Printf("Error in ValidateVersion: %s", - err) + if (info.Version == V1VersionInfo{}.Version) { + err := fmt.Errorf("update version is not "+ + "compatible for the product version %s", productVersion) + logger.Error.Printf("Validating version failed, err=%v", err) return V1VersionInfo{}, err } @@ -111,11 +110,12 @@ func validateVersion(productVersion string, versionInfoArray []V1VersionInfo) (V } // GetCompatibileVersionInfo checks whether the product version is in the -// compatibility list, and returns version info for that product version. +// +// compatibility list, and returns version info for that product version. func GetCompatibileVersionInfo(productVersion, versionInfoString string) (V1VersionInfo, error) { - log.Printf("Entering version::GetCompatibileVersionInfo(%s, %s)...", + logger.Debug.Printf("Entering version::GetCompatibileVersionInfo(%s, %s)...", productVersion, versionInfoString) - defer log.Printf("Exiting version::GetCompatibileVersionInfo...") + defer logger.Debug.Printf("Exiting version::GetCompatibileVersionInfo...") info := V1VersionInfo{} versionInfoArray, err := validateJSONFormat(versionInfoString) diff --git a/validate/version/version_test.go b/validate/version/version_test.go index 5a916ce..af3c1f2 100644 --- a/validate/version/version_test.go +++ b/validate/version/version_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 +// Copyright (c) 2022 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 package version @@ -6,6 +6,8 @@ import ( "encoding/json" "reflect" "testing" + + logger "github.com/VeritasOS/plugin-manager/utils/log" ) func TestCompare(t *testing.T) { @@ -64,6 +66,8 @@ func TestCompare(t *testing.T) { want: true, }, } + // Set log file name to "test", so that cleaning becomes easier. + logger.InitFileLogger("test.log", "INFO") for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := Compare(tt.args.productVersion, tt.args.version); got != tt.want { @@ -138,7 +142,8 @@ func Test_validateVersion(t *testing.T) { wantErr: true, }, } - + // Set log file name to "test", so that cleaning becomes easier. + logger.InitFileLogger("test.log", "INFO") for _, test := range tests { t.Run(test.name, func(t *testing.T) { versionInfo := make([]V1VersionInfo, 0) diff --git a/vendor/github.com/VeritasOS/plugin-manager/.gitignore b/vendor/github.com/VeritasOS/plugin-manager/.gitignore new file mode 100644 index 0000000..66fd13c --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/VeritasOS/plugin-manager/LICENSE b/vendor/github.com/VeritasOS/plugin-manager/LICENSE new file mode 100644 index 0000000..f9c4c98 --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Veritas Technologies LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/VeritasOS/plugin-manager/Makefile b/vendor/github.com/VeritasOS/plugin-manager/Makefile new file mode 100644 index 0000000..1247b23 --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/Makefile @@ -0,0 +1,124 @@ +# Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html +.DEFAULT_GOAL := help +.PHONY: help +help: ## Display this help message. + @grep -Eh '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' + +TOP=$(CURDIR) +include $(TOP)/Makefile.conf + +# Go build related variables +GOSRC=$(TOP) + +# Set GOBIN to where binaries get picked up while creating RPM/ISO. +GOBIN?=$(TOP)/bin +GOCOVERDIR=$(GOSRC)/cover +GOTOOLSBIN=$(TOP)/tools/go/ + +.SILENT: + +.PHONY: all +all: clean analyze build test + +.PHONY: clean +clean: ## Clean Plugin Manager go build & test artifacts + @echo "Cleaning Plugin Manager Go binaries..."; + export GOBIN=$(GOBIN); \ + cd $(GOSRC); \ + go clean -i -mod=vendor ./...; + @echo "Cleaning Go test artifacts... "; + -@find $(GOSRC) -name "*.dot" -o -name "*.html" -o -name "*.log" -o -name "*.svg" -o -name "*.xml" | xargs -i rm -f {} + -@rm -rf $(GOSRC)/plugins/ + -@rm -rf $(GOCOVERDIR); + +.PHONY: build +build: ## Build source code + # Since go build determines and build only updated sources, no need to run clean all go binaries + @echo "Building Plugin Manager Go binaries..."; + export GOBIN=$(GOBIN); \ + cd $(GOSRC); \ + go install -ldflags "-X main.buildDate=`date -u +%Y%m%d.%H%M%S`" -mod=vendor -v ./...; \ + ret=$$?; \ + if [ $${ret} -ne 0 ]; then \ + @echo "Failed to build Plugin Manager Go binaries."; \ + exit 1; \ + fi + + +.PHONY: analyze +analyze: gofmt golint govet go-race ## Analyze source code for different errors through gofmt, golint, govet, go-race + +.PHONY: golint +golint: ## Run golint + @echo Checking Plugin Manager Go code for lint errors... + $(GOTOOLSBIN)/golint -set_exit_status `cd $(GOSRC); go list -mod=vendor -f '{{.Dir}}' ./...` + +.PHONY: gofmt +gofmt: ## Run gofmt + @echo Checking Go code for format errors... + fmterrs=`gofmt -l . | grep -v vendor/ 2>&1`; \ + if [ "$$fmterrs" ]; then \ + echo "gofmt must be run on the following files:"; \ + echo "$$fmterrs"; \ + exit 1; \ + fi + +.PHONY: govet +govet: ## Run go vet + @echo Vetting Plugin Manager Go code for errors... + cd $(GOSRC); \ + go vet -mod=vendor -all ./... + +.PHONY: test +test: ## Run all tests + echo "Running Plugin Manager Go Unit Tests..."; + mkdir -p $(GOCOVERDIR); + export GOCOVERDIR=$(GOCOVERDIR); \ + export INTEG_TEST_BIN=$(GOSRC); \ + export PM_CONF_FILE=$(GOSRC)/sample/pm.config.yaml; \ + export INTEGRATION_TEST=START; \ + cd $(GOSRC); \ + test_failed=0; \ + d=pm; \ + go test -mod=vendor -v --cover -covermode=count -coverprofile=$(GOCOVERDIR)/$${d}.out ./... | \ + $(GOTOOLSBIN)/go-junit-report > TEST-$${d}.xml; \ + ret=$${PIPESTATUS[0]}; \ + if [ $${ret} -ne 0 ]; then \ + echo "Go unit test failed for $${d}."; \ + test_failed=1; \ + fi ; \ + awk -f $(TOP)/tools/gocoverage-collate.awk $(GOCOVERDIR)/* > $(GOCOVERDIR)/cover.out; \ + go tool cover -html=$(GOCOVERDIR)/cover.out -o go-coverage-$${d}.html; \ + $(GOTOOLSBIN)/gocov convert $(GOCOVERDIR)/cover.out | $(GOTOOLSBIN)/gocov-xml > go-coverage-$${d}.xml; \ + rm -rf $(GOCOVERDIR)/*; \ + export INTEGRATION_TEST=DONE; \ + if [ $${test_failed} -ne 0 ]; then \ + echo "Go unit tests failed."; \ + exit 1; \ + fi + +.PHONY: go-race +go-race: ## Run Go tests with race detector enabled + echo "Checking Go code for race conditions..."; + # NOTE: COVER directory should be present, along with INTEGRATION_TEST + # value being set to "START" for integ_test.go to succeed. + mkdir -p $(GOCOVERDIR); + export GOCOVERDIR=$(GOCOVERDIR); \ + export INTEGRATION_TEST=START; \ + export INTEG_TEST_BIN=$(GOSRC); \ + cd $(GOSRC); \ + export PM_CONF_FILE=$(GOSRC)/sample/pm.config.yaml; \ + go test -mod=vendor -v -race ./...; + +.PHONY: update-go-tools +update-go-tools: ## Update Go thirdparty tools to current go version + export GOBIN=$(GOTOOLSBIN); \ + go install github.com/axw/gocov/gocov@latest; \ + go install github.com/AlekSi/gocov-xml@latest; \ + go install github.com/matm/gocov-html/cmd/gocov-html@latest; \ + go install github.com/jstemmer/go-junit-report/v2@latest; \ + go install golang.org/x/lint/golint@latest + +.NOTPARALLEL: diff --git a/vendor/github.com/VeritasOS/plugin-manager/Makefile.conf b/vendor/github.com/VeritasOS/plugin-manager/Makefile.conf new file mode 100644 index 0000000..5b3dd7d --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/Makefile.conf @@ -0,0 +1,3 @@ +# Copyright (c) 2023 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +SHELL = /bin/bash diff --git a/vendor/github.com/VeritasOS/plugin-manager/README.md b/vendor/github.com/VeritasOS/plugin-manager/README.md new file mode 100644 index 0000000..c748482 --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/README.md @@ -0,0 +1,447 @@ +# Plugin Manager (PM) + +[![Go](https://github.com/VeritasOS/plugin-manager/actions/workflows/go.yml/badge.svg?branch=v1)](https://github.com/VeritasOS/plugin-manager/actions/workflows/go.yml) + +Plugin Manager (PM) provides a way for components to define actions and +validations via plugins. PM provides dependency management similar to Red Hat +systemd. + +**Table of Contents** + +- [Plugin Manager (PM)](#plugin-manager-pm) + - [Plugins](#plugins) + - [Plugin Types and File Extensions](#plugin-types-and-file-extensions) + - [Plugin Dependencies](#plugin-dependencies) + - [Viewing Plugin and its dependencies](#viewing-plugin-and-its-dependencies) + - [Example: Plugin Manager (PM) `list`](#example-plugin-manager-pm-list) + - [Configuring Plugin Manager](#configuring-plugin-manager) + - [Running Plugins](#running-plugins) + - [Example: Plugin Manager (PM) `run -plugins`](#example-plugin-manager-pm-run--plugins) + - [Specify `-plugins` details as a json string](#specify--plugins-details-as-a-json-string) + - [Specify `-plugins` details via json file](#specify--plugins-details-via-json-file) + - [Example: Plugin Manager (PM) `run -type`](#example-plugin-manager-pm-run--type) + - [Example: Plugin Manager (PM) with `sequential` flag](#example-plugin-manager-pm-with-sequential-flag) + - [Example: Overriding Plugin Manager (PM) configuration - `library`, `log-dir` and `log-file`](#example-overriding-plugin-manager-pm-configuration---library-log-dir-and-log-file) + - [Example: Writing plugins result to a `output-file` in `output-format` {json, yaml} format](#example-writing-plugins-result-to-a-output-file-in-output-format-json-yaml-format) + +## Plugins + +Plugin Manager basically uses config that are known as *Plugins* to inform +about what it does, it’s dependencies, and the action to be performed. + +- **`Description`**: about the action i.e., what the plugin does. +- **`ExecStart`**: refers to action to be performed i.e., binary to be executed. + - **Note**: `ExecStart` should have absolute path to the binary, so that PM + knows where the binary is located. In case, binaries are located in the + same plugin directory, then you could specify path using the PM's plugins + library path that would be updated in the environment variable + i.e., `PM_LIBRARY`. + - **Example**: `ExecStart=/bin/sh ${PM_LIBRARY}//example.sh`. +- **`RequiredBy`**: informs that the current plugin must be run before the + specified plugins. + In other words, the specified plugins must be run after the current plugin. +- **`Requires`**: informs that the current plugin must be run after the + specified plugins. + In other words, the specified plugins must be run before the current plugin. + +All plugins must be installed (extracted) into +`${PM_LIBRARY}/` folder. If you would like to customize +this path, you could either + +1. Create/update the config file to look for a different plugins library path, + and set the `PM_CONF_FILE` to the config file. +2. Specify the plugins library path i.e., `-library` while calling PM. + +The `PM_LIBRARY` would be set to the `library` location specified in the PM configuration file. The configuration file currently has `library` as `/system/upgrade/repository/plugins/`, but could change in future. And hence one must use the environment variable `${PM_LIBRARY}` to access the plugins library path. + +## Plugin Types and File Extensions + +The type of a plugin is identified basically based on plugin file's extension, and it's up to the consumer of the plugin manager to define the plugin types for their actions. + +**Example**: To perform pre-upgrade tasks by various services/components/features in the upgrade workflow, one could define `.preupgrade` plugin type, and have all these plugins called through plugin manager by specifying `-type` as `preupgrade`. + +## Plugin Dependencies + +Plugin Manager allows specifying dependencies between plugins. +Basically it allows a plugin to be run before or after certain plugins are run. +The below sample show that "d" plugins requires “b” and “c” plugins to be run +first before it runs, and “A” must be run after "D". + +```bash +$ cat /A/a.prereboot +Description=Applying “A” settings +ExecStart= +``` + +```bash +$ cat /D/d.prereboot +Description=Applying “D” settings +RequiredBy=A/a.prereboot +Requires=B/b.prereboot C/c.prereboot +ExecStart=${PM_LIBRARY}/D/example.sh +``` + +### Viewing Plugin and its dependencies + +The plugins and its dependencies can be viewed visually in a svg image by running the `list` command of Plugin Manager. + +The PM list command syntax / usage is as shown below: + +```bash +pm list -type + [-library=] + [-log-tag=] + [-log-dir=] + [-log-file=] +``` + +where + +- **`type`**: Indicates the plugin type. +- **`library`**: Indicates the location of plugins library. + **Overrides** value present in PM configuration. +- **`log-tag`**: Indicates the log tag written by rsyslog. + Note: rsyslog is used as default logger for both main and plugin logs. + It will be overwritten if `log-file` option set. +- **`log-dir`**: Indicates the log directory path. + **Overrides** value present in PM configuration. +- **`log-file`**: Indicates the name of the log file. + **Overrides** value present in PM configuration. + +#### Example: Plugin Manager (PM) `list` + +```bash +$ $GOBIN/pm list -type=preupgrade +Log: pm.2020-01-13T15:56:46.6817949-08:00.log +The list of plugins are mapped in .//preupgrade.2020-01-13T15:56:46.725348-08:00.svg +``` + +## Configuring Plugin Manager + +Plugin Manager can be configured to look for plugins at a specific location, +and to write logs to a specific file by specifying those details in the +Plugin Manager configuration file `/opt/veritas/appliance/asum/pm.config.yml`. + +Instead of updating the default config file, one can choose to provide his/her +own custom config file. +This can be done by setting the environment variable `PM_CONF_FILE` to the +custom config file path as shown below. + +```bash +export PM_CONF_FILE=sample/pm.config.yaml +``` + +The config file could be either a `yaml` or a `json` file. +Below is a sample `yaml` configuration file. + +```bash +$ cat pm.config.yaml +--- +PluginManager: + # `library` is the location where plugin directories containing plugins are expected to be present + library: "./sample/library" + log dir: "./" + # `log file` indicates the name of the log file. + # The timestamp and '.log' extension would be appended to this name. + # I.e., The format of the log file generated would be: "..log" + # Example: The below value results in following log file: pm.2020-01-13T16:11:58.6006565-08:00.log + log file: "pm.log" +... +``` + +## Running Plugins + +Plugin Manager tries to executes all available plugins of a certain type as +mentioned by the `-type` argument. The result of the execution of a plugin +(i.e., result of execution of binary which is specified in `ExecStart`) is +marked as `Succeeded` or `Failed` to mean success or failure respectively. +The PM checks for exit status of the binary to infer success or failure. +If the binary exits with 0, then plugin execution is marked as `Succeeded`, +while any non zero exit value is considered as `Failed`. In case of non zero +exit value of plugins, the PM exits with 1. + +The PM run command syntax / usage is as shown below: + +```bash +pm run [-plugins ] + [-type ] + [-library=] + [-sequential[={true|1|false|0}]] + [-log-tag=] + [-log-dir=] + [-log-file=] + [-output={json|yaml}] + [-output-file=] +``` + +where + +- **`plugins`**: A json string or a json file containing plugins and its dependencies. +- **`type`**: Indicates the plugin type. +- **`library`**: Indicates the location of plugins library. + **Overrides** value present in PM configuration. + **NOTE** The specified value gets set as an environment variable `PM_LIBRARY` for the plugins being run. The plugin file can access any scripts in the same folder via `PM_LIBRARY` variable. +- **`-sequential`**: Indicates PM to execute only one plugin at a time + regardless of how many plugins' dependencies are met. + **Default: Disabled**. To enable, specify `-sequential=true` or just + `-sequential` while running PM. +- **`log-tag`**: Indicates the log tag written by rsyslog. The `log-tag` option will supercede `log-dir` and `log-file` options. +- **`log-dir`**: Indicates the log directory path. + **Overrides** value present in PM configuration. +- **`log-file`**: Indicates the name of the log file. + **Overrides** value present in PM configuration. +- **`output`**: Indicates the format to write the plugins run results. + Supported formats: "json", "yaml". +- **`output-file`**: Indicates the name of the output file. + **Note** Specified in conjunction with `output`. + If `output` format is specified, and `output-file` is not specified, + then result will be displayed on console. + +### Example: Plugin Manager (PM) `run -plugins` + +```json +$ jq -n "$plugins" | tee sample/plugins-prereboot.json +{ + "Plugins": [ + { + "Name": "A/a.prereboot", + "Description": "Applying \"A\" settings", + "ExecStart": "/usr/bin/ls -l -t", + "Requires": [ + "C/c.prereboot", + "D/d.prereboot" + ] + }, + { + "Name": "B/b.prereboot", + "Description": "Applying \"B\" settings...", + "ExecStart": "/bin/echo \"Running B...\"", + "RequiredBy": [ + "D/d.prereboot" + ] + }, + { + "Name": "C/c.prereboot", + "Description": "Applying \"C\" settings...", + "ExecStart": "/bin/echo \"Running C...\"", + "RequiredBy": [ + "A/a.prereboot" + ] + }, + { + "Name": "D/d.prereboot", + "Description": "Applying \"D\" settings...", + "ExecStart": "/bin/echo 'Running D...!'", + "RequiredBy": [ + "A/a.prereboot" + ], + "Requires": [ + "B/b.prereboot" + ] + } + ] +} +$ +``` + +#### Specify `-plugins` details as a json string + +```bash +$ $GOBIN/pm run -plugins "$plugins" +Applying "B" settings...: Starting +Applying "C" settings...: Starting +Applying "B" settings...: Succeeded +Applying "D" settings...: Starting +Applying "C" settings...: Succeeded +Applying "D" settings...: Succeeded +Applying "A" settings: Starting +Applying "A" settings: Succeeded +Running plugins: Succeeded +bash-5.1$ +``` + +#### Specify `-plugins` details via json file + +```bash +$ $GOBIN/pm run -plugins "./sample/plugins-prereboot.json" -library sample/library/ +Applying "C" settings...: Starting +Applying "B" settings...: Starting +Applying "C" settings...: Succeeded +Applying "B" settings...: Succeeded +Applying "D" settings...: Starting +Applying "D" settings...: Succeeded +Applying "A" settings: Starting +Applying "A" settings: Succeeded +Running plugins: Succeeded +$ +``` + +### Example: Plugin Manager (PM) `run -type` + +```bash +$ $GOBIN/pm run -type=prereboot +Log: pm.2019-07-12T15:23:07.3494206-07:00.log + +Applying "B" settings: Starting + +Applying "C" settings: Starting +Applying "C" settings: Succeeded +Applying "B" settings: Succeeded + +Applying "D" settings: Starting +Applying "D" settings: Succeeded + +Applying "A" settings: Starting +Applying "A" settings: Succeeded +Running prereboot plugins: Succeeded +$ +``` + +### Example: Plugin Manager (PM) with `sequential` flag + +The `sequential` option informs Plugin Manager to execute one plugin at a time. +By default, this is disabled, and multiple plugins whose dependencies are met +would be run in parallel. + +```bash +$ $GOBIN/pm run -type=prereboot -sequential +Log: pm.2019-07-12T15:36:33.7415514-07:00.log + +Applying "B" settings: Starting +Applying "B" settings: Succeeded + +Applying "C" settings: Starting +Applying "C" settings: Succeeded + +Applying "D" settings: Starting +Applying "D" settings: Succeeded + +Applying "A" settings: Starting +Applying "A" settings: Succeeded +Running prereboot plugins: Succeeded +$ +``` + +### Example: Overriding Plugin Manager (PM) configuration - `library`, `log-dir` and `log-file` + +To override the values in the PM configuration, specify one or many of the +following optional arguments: `library`, `log-dir` and `log-file` + +```bash +$ $GOBIN/pm run -type postreboot -library=sample/library/ -log-dir=testlogs/ -log-file=test.log +Log: pm.2019-07-12T15:39:08.1145946-07:00.log +Log: testlogs/test.2019-07-12T15:39:08.1209416-07:00.log + +Validating "A's" configuration: Starting +Validating "A's" configuration: Succeeded +Running postreboot plugins: Succeeded +$ ls testlogs/ +test.2019-07-12T15:39:08.1209416-07:00.log +$ +``` + +### Example: Writing plugins result to a `output-file` in `output-format` {json, yaml} format + +```bash +$ $GOBIN/pm run -type preupgrade -output-format=json -output-file=a.json -library ./sample/library/ +Log: /var/log/asum/pm.2021-01-29T17:46:57.6904918-08:00.log + +Checking for "D" settings...: Starting +Checking for "D" settings...: Succeeded + +Checking for "A" settings: Starting +Checking for "A" settings: Succeeded +Running preupgrade plugins: Succeeded +$ +``` + +```json +$ cat a.json +{ + "Type": "preupgrade", + "Plugins": [ + { + "Description": "Checking for \"D\" settings...", + "Name": "D/d.preupgrade", + "ExecStart": "$PM_LIBRARY/D/preupgrade.sh", + "RequiredBy": [ + "A/a.preupgrade" + ], + "Requires": null, + "Status": "Succeeded", + "StdOutErr": "Running preupgrade.sh (path: sample/library//D/preupgrade.sh) with status(0)...\nDisplaying Plugin Manager (PM) Config file path: \nDone(0)!\n" + }, + { + "Description": "Checking for \"A\" settings", + "Name": "A/a.preupgrade", + "ExecStart": "/bin/echo \"Checking A...\"", + "RequiredBy": null, + "Requires": [ + "D/d.preupgrade" + ], + "Status": "Succeeded", + "StdOutErr": "\"Checking A...\"\n" + } + ], + "Status": "Succeeded", + "StdOutErr": "" +}$ +``` + +```bash +$ $GOBIN/pm run -type preupgrade -output-format=yaml -output-file=a.yaml -library ./sample/library/ +Log: /var/log/asum/pm.2021-01-29T17:53:15.8128937-08:00.log + +Checking for "D" settings...: Starting +Checking for "D" settings...: Failed +Running preupgrade plugins: Failed +$ +``` + +```yaml +# cat a.yaml +name: preupgrade +description: "" +requiredby: [] +requires: [] +execstart: "" +plugins: + - name: A/a.preupgrade + description: Checking for "A" settings + requiredby: [] + requires: + - D/d.preupgrade + execstart: /bin/echo "Checking A..." + plugins: [] + library: "" + runtime: + starttime: 2024-10-28T18:21:17.289968946-05:00 + endtime: 2024-10-28T18:21:17.337773824-05:00 + duration: 47.804888ms + status: Skipped + stdouterr: [] + - name: D/d.preupgrade + description: Checking for "D" settings... + requiredby: [] + requires: [] + execstart: $PM_LIBRARY/D/preupgrade.sh + plugins: [] + library: "" + runtime: + starttime: 2024-10-28T18:21:17.220368224-05:00 + endtime: 2024-10-28T18:21:17.289945583-05:00 + duration: 69.577293ms + status: Failed + stdouterr: + - 'Running preupgrade.sh (path: sample/library//D/preupgrade.sh) with status(1)...' + - 'Displaying Plugin Manager (PM) Config file path: ' + - Fail(1) +library: "" +runtime: + starttime: 2024-10-28T18:21:17.185365352-05:00 + endtime: 2024-10-28T18:21:17.337805574-05:00 + duration: 152.440222ms +status: Failed +stdouterr: + - 'Running preupgrade plugins: Failed' +``` diff --git a/vendor/github.com/VeritasOS/plugin-manager/config/config.go b/vendor/github.com/VeritasOS/plugin-manager/config/config.go new file mode 100644 index 0000000..80fd202 --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/config/config.go @@ -0,0 +1,182 @@ +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +package config + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + logger "github.com/VeritasOS/plugin-manager/utils/log" + "gopkg.in/yaml.v3" +) + +// Config is Plugin Manager's configuration information. +type Config struct { + // PluginManager configuration information. + PluginManager struct { + // Library is the path where plugin directories containing plugin files are present. + Library string `yaml:"library"` + LogDir string `yaml:"log dir"` + LogFile string `yaml:"log file"` + LogLevel string `yaml:"log level"` + } +} + +var myConfig Config + +var ( + // EnvConfFile is environment variable containing the config file path. + EnvConfFile string + // DefaultConfigPath is default path for config file used when EnvConfFile is not set. + DefaultConfigPath string +) + +// GetLogDir provides location for storing logs. +func GetLogDir() string { + // TODO: Move log parameters one level up in config as it's common to asum, + // and not specific to PM. + return filepath.FromSlash(filepath.Clean(myConfig.PluginManager.LogDir) + + string(os.PathSeparator)) +} + +// GetLogFile provides name of logfile. +func GetLogFile() string { + // TODO: Move log parameters one level up in config as it's common to asum, + // and not specific to PM. + return myConfig.PluginManager.LogFile +} + +// GetLogLevel provides name of loglevel. +func GetLogLevel() string { + return myConfig.PluginManager.LogLevel +} + +// GetPluginsLibrary gets location of plugins library. +func GetPluginsLibrary() string { + return filepath.FromSlash(filepath.Clean(myConfig.PluginManager.Library) + + string(os.PathSeparator)) +} + +// GetPMLogDir provides location for storing Plugin Manager logs. +// +// NOTE: The plugin logs would be stored "plugins" directory under the +// same path, and use GetPluginsLogDir() to get that path. +func GetPMLogDir() string { + return filepath.FromSlash(filepath.Clean(myConfig.PluginManager.LogDir) + + string(os.PathSeparator)) +} + +// GetPMLogFile gets the file for storing Plugin Manager logs. +func GetPMLogFile() string { + return myConfig.PluginManager.LogFile +} + +// GetPluginsLogDir provides location for storing individual plugins execution logs. +func GetPluginsLogDir() string { + return GetPMLogDir() + "plugins" + string(os.PathSeparator) +} + +// Load config information +func Load() error { + logger.Debug.Println("Entering config.Load()") + defer logger.Debug.Println("Exiting config.Load()") + + myConfigFile := os.Getenv(EnvConfFile) + if myConfigFile == "" { + logger.Info.Printf("%s env is not set. Using default config file.", EnvConfFile) + myConfigFile = DefaultConfigPath + } + myConfigFile = filepath.FromSlash(myConfigFile) + logger.Debug.Printf("config file: %s", myConfigFile) + var err error + myConfig, err = readConfigFile(myConfigFile) + // Set default values when it's not specified in config file. + if myConfig.PluginManager.LogDir == "" { + myConfig.PluginManager.LogDir = logger.DefaultLogDir + } + if myConfig.PluginManager.LogFile == "" { + myConfig.PluginManager.LogFile = logger.DefaultLogFile + } + if myConfig.PluginManager.LogLevel == "" { + myConfig.PluginManager.LogLevel = logger.DefaultLogLevel + } + logger.Debug.Printf("Plugin Manager Config: %+v", myConfig) + return err +} + +func readConfigFile(confFilePath string) (Config, error) { + logger.Debug.Printf("Entering readConfigFile(%s)", confFilePath) + defer logger.Debug.Println("Exiting readConfigFile") + + var conf Config + bFileContents, err := ioutil.ReadFile(confFilePath) + if err != nil { + return conf, logger.ConsoleError.PrintNReturnError("Failed to read \"" + + confFilePath + "\" file.") + } + + err = yaml.Unmarshal(bFileContents, &conf) + if err != nil { + logger.Error.Printf("Failed to call yaml.Unmarshal(%s, %s); err=%s", + bFileContents, &conf, err.Error()) + return conf, logger.ConsoleError.PrintNReturnError("Failed to parse %s config file.", confFilePath) + } + + logger.Debug.Printf("Config: %+v", conf) + return conf, nil +} + +// SetLogDir sets the location for storing Plugin Manager logs. +// +// Use GetPMLogDir() to obtain this location from config. +// NOTE: The plugin logs would be stored "plugins" directory under the +// same path, and use GetPluginsLogDir() to get that path. +func SetLogDir(logDir string) { + // TODO: Move log parameters one level up in config as it's common to asum, + // and not specific to PM. + myConfig.PluginManager.LogDir = filepath.FromSlash( + filepath.Clean(logDir) + string(os.PathSeparator)) +} + +// SetLogFile sets the log file to use. +// +// Use GetLogDir() to obtain this location from config. +// NOTE: The plugin logs would be stored "plugins" directory under the +// same path, and use Get/SetPluginsLogDir() to get/set that path. +func SetLogFile(logFile string) { + // TODO: Move log parameters one level up in config as it's common to asum, + // and not specific to PM. + myConfig.PluginManager.LogFile = logFile +} + +// SetLogLevel sets the log level. +func SetLogLevel(logLevel string) { + myConfig.PluginManager.LogLevel = logLevel +} + +// SetPluginsLibrary sets the plugins library location. +func SetPluginsLibrary(library string) { + myConfig.PluginManager.Library = filepath.FromSlash( + filepath.Clean(library) + string(os.PathSeparator)) +} + +// SetPMLogFile sets the file for storing Plugin Manager logs. +func SetPMLogFile(logfile string) { + // add .log suffix if it doesn't exist. + if !strings.HasSuffix(logfile, ".log") { + logfile += ".log" + } + myConfig.PluginManager.LogFile = logfile +} + +// SetPMLogDir sets the location for storing Plugin Manager logs. +// +// Use GetPMLogDir() to obtain this location from config. +// NOTE: The plugin logs would be stored "plugins" directory under the +// same path, and use GetPluginsLogDir() to get that path. +func SetPMLogDir(logDir string) { + myConfig.PluginManager.LogDir = filepath.FromSlash( + filepath.Clean(logDir) + string(os.PathSeparator)) +} diff --git a/vendor/github.com/VeritasOS/plugin-manager/go.mod b/vendor/github.com/VeritasOS/plugin-manager/go.mod new file mode 100644 index 0000000..fa38b87 --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/go.mod @@ -0,0 +1,5 @@ +module github.com/VeritasOS/plugin-manager + +go 1.19 + +require gopkg.in/yaml.v3 v3.0.1 diff --git a/vendor/github.com/VeritasOS/plugin-manager/go.sum b/vendor/github.com/VeritasOS/plugin-manager/go.sum new file mode 100644 index 0000000..a62c313 --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/go.sum @@ -0,0 +1,4 @@ +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/VeritasOS/plugin-manager/graph/graph.go b/vendor/github.com/VeritasOS/plugin-manager/graph/graph.go new file mode 100644 index 0000000..ca4e1dc --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/graph/graph.go @@ -0,0 +1,194 @@ +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +// Package graph is used for generating the graph image. +package graph + +import ( + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/VeritasOS/plugin-manager/config" + "github.com/VeritasOS/plugin-manager/types" + "github.com/VeritasOS/plugin-manager/types/status" + logger "github.com/VeritasOS/plugin-manager/utils/log" + osutils "github.com/VeritasOS/plugin-manager/utils/os" +) + +// graph of plugin and its dependencies. +type graph struct { + // fileNoExt is the name of the graph artifacts without extension. + // Extensions could be added to generate input `.dot` file or output + // `.svg` images. + fileNoExt string + // subgraph contains subgraph name (i.e., cluster name) and its contents. + // I.e., each subgraph name is the key, and their contents would be in + // an array. + subgraph sync.Map +} + +var g graph +var dotCmdPresent = true + +// Plugin is of type types.Plugin +type Plugin = types.Plugin + +// Plugins is of type types.Plugins +type Plugins = types.Plugins + +// InitGraphConfig initliazes output file names. +func InitGraphConfig(imgNamePrefix string) { + // Initialization should be done only once. + if g.fileNoExt == "" { + // Remove imgNamePrefix if it's end with ".log" + imgNamePrefix = strings.TrimSuffix(imgNamePrefix, ".log") + g.fileNoExt = imgNamePrefix + "." + time.Now().Format(time.RFC3339Nano) + } +} + +// GetImagePath gets the path of the image file. +func GetImagePath() string { + return config.GetPMLogDir() + g.fileNoExt + ".svg" +} + +// GetDotFilePath gets the path of the dot file. +func GetDotFilePath() string { + return config.GetPMLogDir() + g.fileNoExt + ".dot" +} + +// InitGraph initliazes the graph data structure and invokes generateGraph. +func InitGraph(pluginType string, pluginsInfo Plugins) error { + InitGraphConfig(config.GetPMLogFile()) + + // DOT guide: https://graphviz.gitlab.io/_pages/pdf/dotguide.pdf + + for pIdx, p := range pluginsInfo { + pFileString := "\"" + p.Name + "\"" + absLogPath, _ := filepath.Abs(config.GetPMLogDir()) + absLibraryPath, _ := filepath.Abs(config.GetPluginsLibrary()) + relPath, _ := filepath.Rel(absLogPath, absLibraryPath) + pURL := "\"" + filepath.FromSlash(relPath+string(os.PathSeparator)+p.Name) + "\"" + rows := []string{} + rowsInterface, ok := g.subgraph.Load(pluginType) + if ok { + rows = rowsInterface.([]string) + } + rows = append(rows, pFileString+" [label=\""+ + strings.Replace(pluginsInfo[pIdx].Description, "\"", `\"`, -1)+ + "\",style=filled,fillcolor=lightgrey,URL="+pURL+"]") + rows = append(rows, "\""+p.Name+"\"") + rbyLen := len(pluginsInfo[pIdx].RequiredBy) + if rbyLen != 0 { + graphRow := "\"" + p.Name + "\" -> " + for rby := range pluginsInfo[pIdx].RequiredBy { + graphRow += "\"" + pluginsInfo[pIdx].RequiredBy[rby] + "\"" + if rby != rbyLen-1 { + graphRow += ", " + } + } + rows = append(rows, graphRow) + } + rsLen := len(pluginsInfo[pIdx].Requires) + if rsLen != 0 { + graphRow := "" + for rs := range pluginsInfo[pIdx].Requires { + graphRow += "\"" + pluginsInfo[pIdx].Requires[rs] + "\"" + if rs != rsLen-1 { + graphRow += ", " + } + } + graphRow += " -> \"" + p.Name + "\"" + rows = append(rows, graphRow) + } + g.subgraph.Store(pluginType, rows) + } + + return generateGraph() +} + +// generateGraph generates an input `.dot` file based on the fileNoExt name, +// and then generates an `.svg` image output file as fileNoExt.svg. +func generateGraph() error { + dotFile := GetDotFilePath() + svgFile := GetImagePath() + + fhDigraph, openerr := osutils.OsOpenFile(dotFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if openerr != nil { + abspath, _ := filepath.Abs(dotFile) + logger.Error.Printf("OsOpenFile(%s) Abs path: %v, err=%s", dotFile, abspath, openerr.Error()) + return openerr + } + defer fhDigraph.Close() + clusterCnt := 0 + graphContent := "digraph {\n" + g.subgraph.Range(func(name interface{}, rows interface{}) bool { + graphContent += "\nsubgraph cluster_" + strconv.Itoa(clusterCnt) + " {\n" + + "label=\"" + name.(string) + " plugins\"\nlabelloc=t\nfontsize=24\n" + + "node [shape=polygon,sides=6,style=filled,fillcolor=red]\n" + + strings.Join(rows.([]string), "\n") + "\n}\n" + clusterCnt++ + return true + }) + graphContent += "\n}\n" + + _, writeerr := fhDigraph.WriteString(graphContent) + if writeerr != nil { + logger.Error.Printf("fhDigraph.WriteString(%s), err=%s", graphContent, writeerr.Error()) + return writeerr + } + + // https://graphviz.gitlab.io/_pages/doc/info/command.html + cmdStr := "dot" + // If cmdStr is not installed on system, then just return. + if !dotCmdPresent { + return nil + } + cmdParams := []string{"-Tsvg", dotFile, "-o", svgFile} + + cmd := osutils.ExecCommand(os.ExpandEnv(cmdStr), cmdParams...) + stdOutErr, err := cmd.CombinedOutput() + if err != nil { + if strings.Contains(err.Error(), "executable file not found in $PATH") { + dotCmdPresent = false + return nil + } + logger.Error.Printf("osutils.ExecCommand(%v, %v), err=%s", cmd, cmdParams, err.Error()) + } + if len(stdOutErr) != 0 { + logger.Debug.Println("Stdout & Stderr:", string(stdOutErr)) + } + + return err +} + +// getStatusColor returns the color for a given result status. +func getStatusColor(myStatus string) string { + // Node color + ncolor := "blue" // status.Start by default + if myStatus == status.Fail { + ncolor = "red" + } else if myStatus == status.Ok { + ncolor = "green" + } else if myStatus == status.Skip { + ncolor = "yellow" + } + return ncolor +} + +// UpdateGraph updates the plugin node with the status and url. +func UpdateGraph(subgraphName, plugin, status, url string) error { + ncolor := getStatusColor(status) + gContents := []string{} + gContentsInterface, ok := g.subgraph.Load(subgraphName) + if ok { + gContents = gContentsInterface.([]string) + } + gContents = append(gContents, + "\""+plugin+"\" [style=filled,fillcolor="+ncolor+",URL=\""+url+"\"]") + g.subgraph.Store(subgraphName, gContents) + + return generateGraph() +} diff --git a/vendor/github.com/VeritasOS/plugin-manager/plugin.go b/vendor/github.com/VeritasOS/plugin-manager/plugin.go new file mode 100644 index 0000000..27df116 --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/plugin.go @@ -0,0 +1,1035 @@ +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +// Package pm defines Plugin Manager (PM) functions like executing +// all plugins of a particular plugin type. +package pm + +import ( + "bufio" + "encoding/json" + "errors" + "flag" + "fmt" + "io/ioutil" + "log" + "log/syslog" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/VeritasOS/plugin-manager/config" + "github.com/VeritasOS/plugin-manager/graph" + "github.com/VeritasOS/plugin-manager/types" + "github.com/VeritasOS/plugin-manager/types/status" + logger "github.com/VeritasOS/plugin-manager/utils/log" + osutils "github.com/VeritasOS/plugin-manager/utils/os" + "github.com/VeritasOS/plugin-manager/utils/output" + "gopkg.in/yaml.v3" +) + +var ( + // Version of the Plugin Manager (PM). + version = "5.0" +) + +// Plugin is of type types.Plugin +type Plugin = types.Plugin + +// Plugins is of type types.Plugins +type Plugins = types.Plugins + +// getPluginFiles retrieves the plugin files under each component matching +// the specified pluginType. +func getPluginFiles(pluginType, library string) ([]string, error) { + logger.Debug.Println("Entering getPluginFiles") + defer logger.Debug.Println("Exiting getPluginFiles") + + var pluginFiles []string + if _, err := os.Stat(library); os.IsNotExist(err) { + return pluginFiles, logger.ConsoleError.PrintNReturnError("Library '%s' doesn't exist. "+ + "A valid plugins library path must be specified.", library) + } + var files []string + dirs, err := ioutil.ReadDir(library) + if err != nil { + logger.Error.Printf("Failed to call ioutil.ReadDir(%s), err=%s", library, err.Error()) + return pluginFiles, logger.ConsoleError.PrintNReturnError("Failed to get contents of %s plugins library.", library) + } + + for _, dir := range dirs { + compPluginDir := filepath.FromSlash(library + "/" + dir.Name()) + fi, err := os.Stat(compPluginDir) + if err != nil { + logger.Error.Printf("Unable to stat on %s directory, err=%s", dir, err.Error()) + continue + } + if !fi.IsDir() { + logger.Error.Printf("%s is not a directory.", compPluginDir) + continue + } + + tfiles, err := ioutil.ReadDir(compPluginDir) + if err != nil { + logger.Error.Printf("Unable to read contents of %s directory, err=%s", compPluginDir, err.Error()) + } + for _, tf := range tfiles { + files = append(files, filepath.FromSlash(dir.Name()+"/"+tf.Name())) + } + } + + for _, file := range files { + matched, err := regexp.MatchString("[.]"+pluginType+"$", file) + if err != nil { + logger.Error.Printf("Failed to call regexp.MatchString(%s, %s), err=%s", "[.]"+pluginType, file, err.Error()) + continue + } + if matched == true { + pluginFiles = append(pluginFiles, file) + } + } + return pluginFiles, nil +} + +// getPluginType returns the plugin type of the specified plugin file. +func getPluginType(file string) string { + return strings.Replace(path.Ext(file), ".", ``, -1) +} + +func getPluginsInfoFromJSONStrOrFile(strOrFile string) (Plugin, error) { + var err error + var pluginsInfo Plugin + rawData := strOrFile + jsonFormat := true + + // If Plugins information is in file... + fi, err := os.Stat(strOrFile) + if err != nil { + logger.Debug.Printf("Specified input is not a file. Err: %s", + err.Error()) + } else { + if fi.IsDir() { + return pluginsInfo, + logger.ConsoleError.PrintNReturnError( + "Specified path %s is directory. Plugins info should be specified either as a json string or in a json file.", + strOrFile) + } + + pluginsFile := strOrFile + fh, err := os.Open(pluginsFile) + if err != nil { + logger.ConsoleError.PrintNReturnError("%s", err) + return pluginsInfo, err + } + defer fh.Close() + + rawData, err = readFile(filepath.FromSlash(pluginsFile)) + if err != nil { + return pluginsInfo, + logger.ConsoleError.PrintNReturnError(err.Error()) + } + + logger.Debug.Printf("Plugins file %v has ext %v", pluginsFile, path.Ext(pluginsFile)) + if path.Ext(pluginsFile) == ".yaml" || path.Ext(pluginsFile) == ".yml" { + jsonFormat = false + } + } + // INFO: Use Plugin to unmarshal to keep input consistent with current + // output json, so that rerun failed could be done using result json. + var pluginsData Plugin + if jsonFormat { + err = json.Unmarshal([]byte(rawData), &pluginsData) + } else { + err = yaml.Unmarshal([]byte(rawData), &pluginsData) + } + if err != nil { + logger.Error.Printf("Failed to call Unmarshal(%s, %v); err=%#v", + rawData, &pluginsInfo, err) + return pluginsInfo, + logger.ConsoleError.PrintNReturnError( + "Plugins is not in expected format. Error: %s", err.Error()) + } + return pluginsData, nil +} + +func getPluginsInfoFromLibrary(pluginType, library string) (Plugins, error) { + var pluginsInfo Plugins + pluginFiles, err := getPluginFiles(pluginType, library) + if err != nil { + return pluginsInfo, err + } + for file := range pluginFiles { + fContents, rerr := readFile(filepath.FromSlash( + library + pluginFiles[file])) + if rerr != nil { + return pluginsInfo, logger.ConsoleError.PrintNReturnError(rerr.Error()) + } + logger.Debug.Printf("Plugin file %s contents: \n%s\n", + pluginFiles[file], fContents) + pInfo, perr := parseUnitFile(fContents) + if perr != nil { + return pluginsInfo, perr + } + logger.Info.Printf("Plugin %s info: %+v", pluginFiles[file], pInfo) + pInfo.Name = pluginFiles[file] + pluginsInfo = append(pluginsInfo, &pInfo) + } + return pluginsInfo, nil +} + +func normalizePluginsInfo(pluginsInfo Plugins) Plugins { + logger.Debug.Printf("Entering normalizePluginsInfo(%+v)...", pluginsInfo) + defer logger.Debug.Println("Exiting normalizePluginsInfo") + + nPInfo := make(Plugins, len(pluginsInfo)) + pluginIndexes := make(map[string]int, len(pluginsInfo)) + for pIdx, pInfo := range pluginsInfo { + pluginIndexes[pInfo.Name] = pIdx + nPInfo[pIdx] = &Plugin{ + Name: pInfo.Name, + Description: pInfo.Description, + ExecStart: pInfo.ExecStart, + Plugins: pInfo.Plugins, + Library: pInfo.Library, + } + nPInfo[pIdx].RequiredBy = append(nPInfo[pIdx].Requires, pInfo.RequiredBy...) + nPInfo[pIdx].Requires = append(nPInfo[pIdx].Requires, pInfo.Requires...) + logger.Debug.Printf("%s plugin dependencies: %v", nPInfo[pIdx].Name, nPInfo[pIdx]) + } + for pIdx, pInfo := range nPInfo { + p := pInfo.Name + logger.Debug.Printf("nPInfo key(%v): %v", p, nPInfo[pIdx]) + for _, rs := range nPInfo[pIdx].Requires { + // Check whether it's already marked as RequiredBy dependency in `Requires` plugin. + // logger.Info.Printf("Check whether `in` (%s) already marked as RequiredBy dependency in `Requires`(%s) plugin: %v", + // p, rs, nPInfo[rs]) + present := false + // If dependencies are missing, then pluginIndexes[rs] value will not be defined. + if rsIdx, ok := pluginIndexes[rs]; ok { + logger.Debug.Printf("PluginInfo for %s is present: %v", rs, nPInfo[rsIdx]) + for _, rby := range nPInfo[rsIdx].RequiredBy { + logger.Debug.Printf("p(%s) == rby(%s)? %v", p, rby, p == rby) + if p == rby { + present = true + break + } + } + if !present { + nPInfo[rsIdx].RequiredBy = append(nPInfo[rsIdx].RequiredBy, p) + logger.Info.Printf("Added %s as RequiredBy dependency of %s: %+v", p, rs, nPInfo[rsIdx]) + } + } + } + + // Check whether RequiredBy dependencies are also marked as Requires dependency on other plugin. + logger.Info.Println("Check whether RequiredBy dependencies are also marked as Requires dependency on other plugin.") + for _, rby := range nPInfo[pIdx].RequiredBy { + rbyIdx := pluginIndexes[rby] + logger.Debug.Printf("RequiredBy of %s: %s", p, rby) + logger.Debug.Printf("nPInfo of %s: %+v", rby, nPInfo[rbyIdx]) + // INFO: If one plugin type is added as dependent on another by + // any chance, then skip checking its contents as the other + // plugin type files were not parsed. + if _, ok := pluginIndexes[rby]; !ok { + // NOTE: Add the missing plugin in Requires, So that the issue + // gets caught during validation. + nPInfo[pIdx].Requires = append(nPInfo[pIdx].Requires, rby) + continue + } + present := false + for _, rs := range nPInfo[rbyIdx].Requires { + if p == rs { + present = true + break + } + } + if !present { + nPInfo[rbyIdx].Requires = append(nPInfo[rbyIdx].Requires, p) + logger.Debug.Printf("Added %s as Requires dependency of %s: %+v", p, rby, nPInfo[rbyIdx]) + } + } + } + logger.Debug.Printf("Plugins info after normalizing: \n%+v\n", nPInfo) + return nPInfo +} + +// parseUnitFile parses the plugin file contents. +func parseUnitFile(fileContents string) (Plugin, error) { + logger.Debug.Println("Entering parseUnitFile") + defer logger.Debug.Println("Exiting parseUnitFile") + + pluginInfo := Plugin{} + if len(fileContents) == 0 { + return pluginInfo, nil + } + lines := strings.Split(fileContents, "\n") + for l := range lines { + line := strings.TrimSpace(lines[l]) + logger.Debug.Println("line...", line) + line = strings.TrimSpace(line) + if len(line) == 0 { + continue + } + if strings.HasPrefix(line, "#") { + // No need to parse comments. + logger.Debug.Println("Skipping comment line...", line) + continue + } + + fields := strings.Split(line, "=") + if len(fields) == 0 { + continue + } + key := strings.TrimSpace(fields[0]) + val := strings.TrimSpace(strings.Join(fields[1:], "=")) + switch key { + case "Description": + pluginInfo.Description = val + break + case "ExecStart": + pluginInfo.ExecStart = val + break + case "RequiredBy": + pluginInfo.RequiredBy = strings.Split(val, " ") + break + case "Requires": + pluginInfo.Requires = strings.Split(val, " ") + break + default: + logger.Debug.Printf("Non-standard line found: %s", line) + break + } + } + + return pluginInfo, nil +} + +func validateDependencies(nPInfo Plugins) ([]string, error) { + logger.Debug.Println("Entering validateDependencies") + defer logger.Debug.Println("Exiting validateDependencies") + + var pluginOrder []string + notPlacedPlugins := []string{} + dependencyMet := map[string]bool{} + + pluginIndexes := make(map[string]int) + for pIdx, pInfo := range nPInfo { + pluginIndexes[pInfo.Name] = pIdx + } + + for pNameIndex := range nPInfo { + pName := nPInfo[pNameIndex].Name + pContents := nPInfo[pNameIndex] + logger.Debug.Printf("\nPlugin: %s \n%+v \n\n", pName, pContents) + if len(pContents.Requires) == 0 { + dependencyMet[pName] = true + pluginOrder = append(pluginOrder, pName) + } else { + dependencyMet[pName] = false + notPlacedPlugins = append(notPlacedPlugins, pName) + } + } + + curLen := len(notPlacedPlugins) + // elementsLeft to process in the notPlacedPlugins queue! + elementsLeft := curLen + prevLen := curLen + // INFO: + // When all the elements are processed in the queue + // (i.e., `elementsLeft` becomes 0), check whether at least one of the + // plugin's dependency has been met (i.e., prevLen != curLen). If not, + // then there is a circular dependency, or plugins are missing dependencies. + for curLen != 0 { + pName := notPlacedPlugins[0] + notPlacedPlugins = notPlacedPlugins[1:] + pIdx := pluginIndexes[pName] + pDependencies := nPInfo[pIdx].Requires + logger.Info.Printf("Plugin %s dependencies: %+v", pName, pDependencies) + + dependencyMet[pName] = true + for w := range pDependencies { + val := dependencyMet[pDependencies[w]] + if false == val { + // If dependency met is false, then process it later again after all dependencies are met. + dependencyMet[pName] = false + logger.Warning.Printf("Adding %s back to list %s to process as %s plugin dependency is not met.", + pName, notPlacedPlugins, pDependencies[w]) + notPlacedPlugins = append(notPlacedPlugins, pName) + break + } + } + // If dependency met is not set to false, then it means all + // dependencies are met. So, add it to pluginOrder + if false != dependencyMet[pName] { + logger.Info.Printf("Dependency met for %s: %v.", pName, dependencyMet[pName]) + pluginOrder = append(pluginOrder, pName) + } + + elementsLeft-- + if elementsLeft == 0 { + logger.Debug.Printf("PrevLen: %d; CurLen: %d.", prevLen, curLen) + curLen = len(notPlacedPlugins) + if prevLen == curLen { + // INFO: Clear out the pluginOrder as we cannot run all the + // plugins either due to missing dependencies or having + // circular dependency. + return []string{}, logger.ConsoleError.PrintNReturnError( + "There is either a circular dependency between plugins, "+ + "or some dependencies are missing in these plugins: %+v", + notPlacedPlugins) + } + prevLen = curLen + elementsLeft = curLen + } + } + + return pluginOrder, nil +} + +func executePluginCmd(statusCh chan<- map[string]*Plugin, pInfo Plugin, failedDependency bool, env map[string]string) { + p := pInfo.Name + logger.Debug.Printf("Channel: Plugin %s info: \n%+v", p, pInfo) + graph.UpdateGraph(getPluginType(p), p, status.Start, "") + logger.ConsoleInfo.Printf("%s: %s", pInfo.Description, status.Start) + pluginLogFile := "" + var chLog *log.Logger + if !logger.IsFileLogger() { + var logTag string + // Set log tag for + logTag = logger.SyslogTagPrefix + "pm-" + logger.GetLogTag() + logger.Debug.Printf("logTag = %s", logTag) + syslogHandle, err := syslog.New(syslog.LOG_LOCAL0|syslog.LOG_INFO, logTag) + if err != nil { + logger.Error.Printf("Failed to call syslog.New, err=%s", err.Error()) + } + defer syslogHandle.Close() + chLog = log.New(syslogHandle, "", 0) + } else { + // Get relative path to plugins log file from PM log dir, so that linking + // in plugin graph works even when the logs are copied to another system. + pluginLogFile = strings.Replace(config.GetPluginsLogDir(), config.GetPMLogDir(), "", -1) + + strings.Replace(p, string(os.PathSeparator), ":", -1) + + "." + time.Now().Format(time.RFC3339Nano) + ".log" + logFile := config.GetPMLogDir() + pluginLogFile + fh, openerr := os.OpenFile(logFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + if openerr != nil { + logger.Error.Printf("Failed to call os.OpenFile(%s), err=%s", logFile, openerr.Error()) + // Ignore error and continue as plugin log file creation is not fatal. + } + defer fh.Close() + // chLog is a channel logger + chLog = log.New(fh, "", log.LstdFlags) + chLog.SetOutput(fh) + } + + chLog.Println("INFO: Plugin file:", p) + + // If already marked as failed/skipped due to dependency fail, + // then just return that status. + myStatus := "" + myStatusMsg := "" + if failedDependency { + myStatusMsg = "Skipping as its dependency failed." + myStatus = status.Skip + } else if len(pInfo.Plugins) != 0 { + execStatus := executePlugins(&pInfo.Plugins, false, env) + if !execStatus { + myStatus = status.Fail + graph.UpdateGraph(getPluginType(p), p, myStatus, "") + err := fmt.Errorf("Running %s plugins: %s", p, myStatus) + statusCh <- map[string]*Plugin{p: {Status: myStatus, StdOutErr: []string{err.Error()}}} + return + } + myStatus = status.Ok + } else if pInfo.ExecStart == "" { + myStatusMsg = "Passing as ExecStart value is empty!" + myStatus = status.Ok + } + + if myStatus != "" { + chLog.Println("INFO: ", myStatusMsg) + logger.Info.Printf("Plugin(%s): %s", p, myStatusMsg) + graph.UpdateGraph(getPluginType(p), p, myStatus, "") + logger.ConsoleInfo.Printf("%s: %s", pInfo.Description, myStatus) + statusCh <- map[string]*Plugin{p: {Status: myStatus}} + return + } + + // INFO: First initialize with existing OS env, and then overwrite any + // existing keys with user specified values. I.e., Even if PM_LIBRARY + // env is set in shell, it'll be overwritten by Library parameter passed + // by user. + envList := osutils.OsEnviron() + envMap := osutils.EnvMap() + for envKey, envValue := range env { + envList = append(envList, envKey+"="+envValue) + envMap[envKey] = envValue + } + + getEnvVal := func(name string) string { + // logger.Debug.Printf("In getEnvVal(%v)...", name) + // logger.Debug.Printf("env: %+v", envMap) + if val, ok := envMap[name]; ok { + // logger.Debug.Printf("Key:%v = Value:%v", name, val) + return val + } + return "" + } + + logger.Info.Printf("Executing command, cmd=%s", pInfo.ExecStart) + // INFO: Expand environment values like "PM_LIBRARY" so that ... + // 1. Binaries or scripts placed in the same directory as that of plugins + // can be accessed as ${PM_LIBRARY}/ path. + // 2. Envs that are set by caller of calling plugin manager gets expanded. + cmdParam := strings.Split(os.Expand(pInfo.ExecStart, getEnvVal), " ") + cmdStr := cmdParam[0] + cmdParams := cmdParam[1:] + cmd := exec.Command(cmdStr, cmdParams...) + cmd.Env = envList + iostdout, err := cmd.StdoutPipe() + if err != nil { + pInfo.Status = status.Fail + logger.Error.Printf("Failed to execute plugin %s. Error: %s\n", pInfo.Name, err.Error()) + pInfo.StdOutErr = []string{err.Error()} + logger.ConsoleInfo.Printf("%s: %s\n", pInfo.Description, pInfo.Status) + statusCh <- map[string]*Plugin{p: &pInfo} + return + } + cmd.Stderr = cmd.Stdout + + chLog.Println("Executing command:", pInfo.ExecStart) + err = cmd.Start() + var stdOutErr []string + if err == nil { + scanner := bufio.NewScanner(iostdout) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + iobytes := scanner.Text() + chLog.Println(string(iobytes)) + stdOutErr = append(stdOutErr, iobytes) + } + err = cmd.Wait() + // chLog.Printf("command exited with code: %+v", err) + } + + func() { + chLog.Printf("INFO: Plugin(%s): Executing command: %s", p, pInfo.ExecStart) + if err != nil { + chLog.Printf("ERROR: Plugin(%s): Failed to execute command, err=%s", p, err.Error()) + graph.UpdateGraph(getPluginType(p), p, status.Fail, pluginLogFile) + } else { + chLog.Printf("INFO: Plugin(%s): Stdout & Stderr: %v", p, stdOutErr) + graph.UpdateGraph(getPluginType(p), p, status.Ok, pluginLogFile) + } + }() + + logger.Debug.Println("Stdout & Stderr:", stdOutErr) + pStatus := Plugin{StdOutErr: stdOutErr} + if err != nil { + pStatus.Status = status.Fail + logger.Error.Printf("Failed to execute plugin %s. err=%s\n", p, err.Error()) + logger.ConsoleError.Printf("%s: %s\n", pInfo.Description, status.Fail) + statusCh <- map[string]*Plugin{p: &pStatus} + return + } + pStatus.Status = status.Ok + logger.ConsoleInfo.Printf("%s: %s\n", pInfo.Description, status.Ok) + statusCh <- map[string]*Plugin{p: &pStatus} +} + +func executePlugins(psStatus *Plugins, sequential bool, env map[string]string) bool { + logger.Debug.Printf("Entering executePlugins(%+v, %v, %+v)...", + psStatus, sequential, env) + defer logger.Debug.Println("Exiting executePlugins") + + retStatus := true + + nPInfo := normalizePluginsInfo(*psStatus) + + _, err := validateDependencies(nPInfo) + if err != nil { + return false + } + + waitCount := map[string]int{} + for pIdx, pInfo := range nPInfo { + p := pInfo.Name + waitCount[p] = len(nPInfo[pIdx].Requires) + logger.Debug.Printf("%s plugin dependencies: %+v", p, nPInfo[pIdx]) + } + + pluginIndexes := make(map[string]int) + for pIdx, pInfo := range *psStatus { + pluginIndexes[pInfo.Name] = pIdx + } + executingCnt := 0 + exeCh := make(chan map[string]*Plugin) + failedDependency := make(map[string]bool) + for len(pluginIndexes) > 0 || executingCnt != 0 { + for _, pInfo := range nPInfo { + p := pInfo.Name + // INFO: When all dependencies are met, plugin waitCount would be 0. + // When sequential execution is enforced, even if a plugin is ready + // to run, make sure that only one plugin is running at time, by + // checking executing count is 0. + // When sequential execution is not enforced, run plugins that are ready. + if waitCount[p] == 0 && ((sequential == false) || + (sequential == true && executingCnt == 0)) { + logger.Info.Printf("Plugin %s is ready for execution: %v.", p, pInfo) + waitCount[p]-- + pIdx := pluginIndexes[p] + ps := *psStatus + ps[pIdx].RunTime.StartTime = time.Now() + + go executePluginCmd(exeCh, *pInfo, failedDependency[p], env) + executingCnt++ + } + } + // start other dependent ones as soon as one of the plugin completes. + exeStatus := <-exeCh + executingCnt-- + for plugin, pStatus := range exeStatus { + logger.Info.Printf("%s status: %v", plugin, pStatus.Status) + pIdx := pluginIndexes[plugin] + ps := *psStatus + ps[pIdx].RunTime.EndTime = time.Now() + ps[pIdx].RunTime.Duration = ps[pIdx].RunTime.EndTime.Sub(ps[pIdx].RunTime.StartTime) + ps[pIdx].Status = pStatus.Status + ps[pIdx].StdOutErr = pStatus.StdOutErr + if pStatus.Status == status.Fail { + retStatus = false + } + + for _, rby := range nPInfo[pIdx].RequiredBy { + if pStatus.Status == status.Fail || + pStatus.Status == status.Skip { + // TODO: When "Wants" and "WantedBy" options are supported similar to + // "Requires" and "RequiredBy", the failedDependency flag should be + // checked in conjunction with if its required dependency is failed, + // and not the wanted dependency. + failedDependency[rby] = true + } + waitCount[rby]-- + } + delete(pluginIndexes, plugin) + } + } + return retStatus +} + +// CmdOptions contains subcommands and parameters of the pm command. +var CmdOptions struct { + RunCmd *flag.FlagSet + ListCmd *flag.FlagSet + versionCmd *flag.FlagSet + versionPtr *bool + + // sequential enforces execution of plugins in sequence mode. + // (If sequential is disabled, plugins whose dependencies are met would be executed in parallel). + sequential *bool + + // pluginsPtr specifies plugins Name and its Description, ExecStart and any dependencies (Requires, RequiredBy). + // For input format, check 'Plugins' struct. + pluginsPtr *string + + // pluginTypePtr indicates type of the plugin to run. + pluginTypePtr *string + + // libraryPtr indicates the path of the plugins library. + libraryPtr *string + + // pluginDirPtr indicates the location of the plugins. + // NOTE: `pluginDir` is deprecated, use `library` instead. + pluginDirPtr *string +} + +// ListOptions are optional parameters related to list function. +type ListOptions struct { + Type string +} + +// RunOptions are optional parameters related to run function. +type RunOptions struct { + Library string + Type string + Sequential bool +} + +// ListFromLibrary lists the plugin and its dependencies from the plugins +// library path. +func ListFromLibrary(pluginType, library string) error { + pluginsInfo, err := getPluginsInfoFromLibrary(pluginType, library) + if err != nil { + return err + } + + listOptions := ListOptions{ + Type: pluginType, + } + return list(pluginsInfo, listOptions) +} + +// ListFromJSONStrOrFile lists the plugin and its dependencies from a json +// string or a json file. +func ListFromJSONStrOrFile(jsonStrOrFile string, listOptions ListOptions) error { + pluginsInfo, err := getPluginsInfoFromJSONStrOrFile(jsonStrOrFile) + if err != nil { + return err + } + + return list(pluginsInfo.Plugins, listOptions) +} + +// List the plugin and its dependencies. +func list(pluginsInfo Plugins, listOptions ListOptions) error { + pluginType := listOptions.Type + + var err error + + err = graph.InitGraph(pluginType, pluginsInfo) + if err != nil { + return err + } + + logger.ConsoleInfo.Printf("The list of plugins are mapped in %s", graph.GetImagePath()) + return nil +} + +func readFile(filePath string) (string, error) { + bFileContents, err := ioutil.ReadFile(filePath) + if err != nil { + message := "Failed to read " + filePath + " file." + err = errors.New(message) + return "", err + } + + return string(bFileContents), nil +} + +// RegisterCommandOptions registers the command options that are supported +func RegisterCommandOptions(progname string) { + logger.Debug.Println("Entering RegisterCommandOptions") + defer logger.Debug.Println("Exiting RegisterCommandOptions") + + CmdOptions.versionCmd = flag.NewFlagSet(progname+" version", flag.ContinueOnError) + CmdOptions.versionPtr = CmdOptions.versionCmd.Bool("version", false, "print Plugin Manager (PM) version.") + + CmdOptions.RunCmd = flag.NewFlagSet(progname+" run", flag.PanicOnError) + CmdOptions.pluginsPtr = CmdOptions.RunCmd.String( + "plugins", + "", + "Plugins and its dependencies in json format as a string or in a file (Ex: './plugins.json').\nWhen specified, plugin files are not looked up in specified -library path.", + ) + CmdOptions.pluginTypePtr = CmdOptions.RunCmd.String( + "type", + "", + "Type of plugin.", + ) + CmdOptions.libraryPtr = CmdOptions.RunCmd.String( + "library", + "", + "Path of the plugins library.\nSets PM_LIBRARY env value.\n"+ + "When '-plugins' is specified, only PM_LIBRARY env value is set. "+ + "The plugin files are not read from library path.", + ) + CmdOptions.sequential = CmdOptions.RunCmd.Bool( + "sequential", + false, + "Enforce running plugins in sequential.", + ) + logger.RegisterCommandOptions(CmdOptions.RunCmd, map[string]string{ + "log-dir": config.GetLogDir(), + "log-file": config.GetLogFile(), + "log-level": config.GetLogLevel(), + }) + output.RegisterCommandOptions(CmdOptions.RunCmd, map[string]string{}) + + CmdOptions.ListCmd = flag.NewFlagSet(progname+" list", flag.PanicOnError) + CmdOptions.ListCmd.StringVar( + CmdOptions.pluginsPtr, + "plugins", + "", + "Plugins and its dependencies in json format as a string or in a file (Ex: './plugins.json')", + ) + CmdOptions.ListCmd.StringVar( + CmdOptions.pluginTypePtr, + "type", + "", + "Type of plugin.", + ) + CmdOptions.ListCmd.StringVar( + CmdOptions.libraryPtr, + "library", + "", + "Path of the plugins library.", + ) + logger.RegisterCommandOptions(CmdOptions.ListCmd, map[string]string{ + "log-dir": config.GetLogDir(), + "log-file": config.GetLogFile(), + "log-level": config.GetLogLevel(), + }) +} + +// RunFromJSONStrOrFile runs the plugins based on dependencies specified in a +// json string or a json/yaml file. +func RunFromJSONStrOrFile(result *Plugin, jsonStrOrFile string, runOptions RunOptions) error { + pluginsInfo, err := getPluginsInfoFromJSONStrOrFile(jsonStrOrFile) + if err != nil { + result.Status = status.Fail + result.StdOutErr = append(result.StdOutErr, err.Error()) + return err + } + result.Name = pluginsInfo.Name + result.Library = pluginsInfo.Library + result.Plugins = pluginsInfo.Plugins + // INFO: Override values of json/file with explicitly passed cmdline parameter. Else, set runOptions type from json/file. + if runOptions.Type != "" { + result.Name = runOptions.Type + } else { + runOptions.Type = pluginsInfo.Name + } + if runOptions.Library != "" { + result.Library = runOptions.Library + } else { + runOptions.Library = pluginsInfo.Library + } + return run(result, runOptions) +} + +// RunFromLibrary runs the specified plugin type plugins from the library. +func RunFromLibrary(result *Plugin, pluginType string, runOptions RunOptions) error { + result.Name = pluginType + + var pluginsInfo, err = getPluginsInfoFromLibrary(pluginType, runOptions.Library) + if err != nil { + result.Status = status.Fail + result.StdOutErr = append(result.StdOutErr, err.Error()) + return err + } + result.Plugins = pluginsInfo + + runOptions.Type = pluginType + return run(result, runOptions) +} + +// run the specified plugins. +func run(result *Plugin, runOptions RunOptions) error { + logger.Debug.Printf("Entering run(%+v, %+v)...", result, runOptions) + defer logger.Debug.Println("Exiting run") + pluginType := runOptions.Type + sequential := runOptions.Sequential + + result.RunTime.StartTime = time.Now() + defer func() { + result.RunTime.EndTime = time.Now() + result.RunTime.Duration = result.RunTime.EndTime.Sub(result.RunTime.StartTime) + }() + + if err := osutils.OsMkdirAll(config.GetPluginsLogDir(), 0755); nil != err { + err = logger.ConsoleError.PrintNReturnError( + "Failed to create the plugins logs directory: %s. "+ + "Error: %s", config.GetPluginsLogDir(), err.Error()) + result.Status = status.Fail + result.StdOutErr = append(result.StdOutErr, err.Error()) + return err + } + + graph.InitGraph(pluginType, result.Plugins) + + env := map[string]string{} + if runOptions.Library != "" { + env["PM_LIBRARY"] = runOptions.Library + } + execStatus := executePlugins(&result.Plugins, sequential, env) + if execStatus != true { + result.Status = status.Fail + err := fmt.Errorf("Running %s plugins: %s", pluginType, status.Fail) + result.StdOutErr = append(result.StdOutErr, err.Error()) + logger.ConsoleError.Printf("%s\n", err.Error()) + return err + } + result.Status = status.Ok + logger.ConsoleInfo.Printf("Running %s plugins: %s\n", pluginType, status.Ok) + return nil +} + +// ScanCommandOptions scans for the command line options and makes appropriate +// function call. +// Input: +// 1. map[string]interface{} +// where, the options could be following: +// "progname": Name of the program along with any cmds (ex: asum pm) +// "cmd-index": Index to the cmd (ex: run) +func ScanCommandOptions(options map[string]interface{}) error { + logger.Debug.Printf("Entering ScanCommandOptions(%+v)...", options) + defer logger.Debug.Println("Exiting ScanCommandOptions") + + progname := filepath.Base(os.Args[0]) + cmdIndex := 1 + if valI, ok := options["progname"]; ok { + progname = valI.(string) + } + if valI, ok := options["cmd-index"]; ok { + cmdIndex = valI.(int) + } + cmd := os.Args[cmdIndex] + logger.Debug.Println("progname:", progname, "cmd with arguments: ", os.Args[cmdIndex:]) + + switch cmd { + case "version": + logger.ConsoleInfo.Printf("Plugin Manager (PM) version %s", version) + + case "list": + err := CmdOptions.ListCmd.Parse(os.Args[cmdIndex+1:]) + if err != nil { + logger.Error.Printf("Command arguments parse error, cmd=%s, err=%s", cmd, err.Error()) + } + + case "run": + err := CmdOptions.RunCmd.Parse(os.Args[cmdIndex+1:]) + if err != nil { + logger.Error.Printf("Command arguments parse error, cmd=%s, err=%s", cmd, err.Error()) + } + + case "help": + subcmd := "" + if len(os.Args) == cmdIndex+2 { + subcmd = os.Args[cmdIndex+1] + } else if len(os.Args) > cmdIndex+2 { + fmt.Fprintf(os.Stderr, "usage: %s help command\n\nToo many arguments (%d) given.\n", progname, len(os.Args)) + os.Exit(2) + } + usage(progname, subcmd) + + default: + fmt.Fprintf(os.Stderr, "%s: unknown command \"%s\"\n", progname, os.Args[1]) + fmt.Fprintf(os.Stderr, "Run '%s help [command]' for usage.\n", progname) + os.Exit(2) + } + + // Override `pm.config.yaml` value with command-line arguments. + if *CmdOptions.libraryPtr != "" { + config.SetPluginsLibrary(*CmdOptions.libraryPtr) + } + myLogFile := logger.DefaultLogDir + if logger.GetLogDir() != "" { + config.SetPMLogDir(logger.GetLogDir()) + myLogFile = config.GetPMLogDir() + } + // Info: Call set PM log-dir to clean extra slashes, and to append path + // separator at the end. + config.SetPMLogDir(config.GetPMLogDir()) + + // Reinit logging if required. + if logger.GetLogTag() != "" { + // Use Syslog whenever logTag is specified. + err := logger.InitSysLogger(logger.GetLogTag(), logger.GetLogLevel()) + if err != nil { + fmt.Printf("Failed to initialize SysLog [%v]. Exiting...\n", err) + os.Exit(-1) + } + } else { + tLogFile := logger.DefaultLogFile + if logger.GetLogFile() != "" { + tLogFile = logger.GetLogFile() + } + // NOTE: Even when no log file is specified, and we're using default log + // file name, we still need to call SetPMLogFile() as SVG image file name + // is based on this. Otherwise image and dot files will not have any names + // but only extensions (i.e., they get created as hidden files). + config.SetPMLogFile(tLogFile) + myLogFile += config.GetPMLogFile() + if myLogFile != logger.DefaultLogPath { + myLogFile := filepath.Clean(myLogFile) + logger.Info.Println("Logging to specified log file:", myLogFile) + errList := logger.DeInitLogger() + if len(errList) > 0 { + fmt.Printf("Failed to deinitialize logger, err=[%v]", errList) + os.Exit(-1) + } + err := logger.InitFileLogger(myLogFile, logger.GetLogLevel()) + if err != nil { + fmt.Printf("Failed to initialize logger, err=[%v]", err) + os.Exit(-1) + } + } + } + + var err error + pluginType := *CmdOptions.pluginTypePtr + if *CmdOptions.pluginsPtr != "" { + jsonStrOrFile := *CmdOptions.pluginsPtr + switch cmd { + case "list": + err = ListFromJSONStrOrFile(jsonStrOrFile, + ListOptions{Type: pluginType}) + + case "run": + pmstatus := Plugin{} + runOptions := RunOptions{ + Type: pluginType, + Sequential: *CmdOptions.sequential, + } + // NOTE: When '-plugins' info is passed as str or file, don't use + // Library from config. + // The config file is expected to have some library path, and that + // may not be applicable for this set of inputs. So, set/use + // "Library" value only if it's passed as cmdline argument. + if *CmdOptions.libraryPtr != "" { + runOptions.Library = config.GetPluginsLibrary() + } + err = RunFromJSONStrOrFile(&pmstatus, jsonStrOrFile, runOptions) + output.Write(pmstatus) + } + } else if pluginType != "" { + switch cmd { + case "list": + err = ListFromLibrary(pluginType, config.GetPluginsLibrary()) + + case "run": + pmstatus := Plugin{} + err = RunFromLibrary(&pmstatus, pluginType, + RunOptions{Library: config.GetPluginsLibrary(), + Sequential: *CmdOptions.sequential}) + output.Write(pmstatus) + } + } + return err +} + +// Usage of Plugin Manager (pm) command. +func usage(progname, subcmd string) { + switch subcmd { + case "", "pm": + var usageStr = ` +Plugin Manager ( PROGNAME ` + subcmd + `) is a tool for managing ASUM plugins. + +Usage: + + PROGNAME ` + subcmd + ` command [arguments] + +The commands are: + + list lists plugins and its dependencies of specified type in an image. + run run plugins of specified type. + version print Plugin Manager version. + +Use "PROGNAME ` + subcmd + ` help [command]" for more information about a command. + +` + fmt.Fprintf(os.Stderr, strings.Replace(usageStr, "PROGNAME", progname, -1)) + case "version": + CmdOptions.versionCmd.Usage() + case "list": + CmdOptions.ListCmd.Usage() + case "run": + CmdOptions.RunCmd.Usage() + default: + fmt.Fprintf(os.Stderr, "Unknown help topic `%s`. Run '%s'.", subcmd, progname+" help") + fmt.Println() + os.Exit(2) + } +} diff --git a/vendor/github.com/VeritasOS/plugin-manager/types/runtime/runtime.go b/vendor/github.com/VeritasOS/plugin-manager/types/runtime/runtime.go new file mode 100644 index 0000000..5dfb774 --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/types/runtime/runtime.go @@ -0,0 +1,12 @@ +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +package runtime + +import "time" + +// RunTime to hold start time, end time and duration of a plugin and/or plugin manager execution. +type RunTime struct { + StartTime time.Time + EndTime time.Time + Duration time.Duration +} diff --git a/vendor/github.com/VeritasOS/plugin-manager/types/status/status.go b/vendor/github.com/VeritasOS/plugin-manager/types/status/status.go new file mode 100644 index 0000000..dabd34a --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/types/status/status.go @@ -0,0 +1,15 @@ +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +package status + +// Status of plugin execution used for displaying to user on console. +const ( + // Fail + Fail = "Failed" + // Ok means success state. + Ok = "Succeeded" + // Skip + Skip = "Skipped" + // Start indicate process started / is running. + Start = "Starting" +) diff --git a/vendor/github.com/VeritasOS/plugin-manager/types/types.go b/vendor/github.com/VeritasOS/plugin-manager/types/types.go new file mode 100644 index 0000000..1270608 --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/types/types.go @@ -0,0 +1,24 @@ +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +// Package types defines new plugin manager types. +package types + +import "github.com/VeritasOS/plugin-manager/types/runtime" + +// Plugin is plugin's info: name, description, cmd to run, status, stdouterr. +type Plugin struct { + Name string + Description string + RequiredBy []string + Requires []string + ExecStart string + Plugins Plugins + Library string + // TODO: Add Percentage to get no. of pending vs. completed run of plugins. + RunTime runtime.RunTime + Status string + StdOutErr []string +} + +// Plugins is a list of plugins' info. +type Plugins []*Plugin diff --git a/vendor/github.com/VeritasOS/plugin-manager/utils/log/log.go b/vendor/github.com/VeritasOS/plugin-manager/utils/log/log.go new file mode 100644 index 0000000..cc89b91 --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/utils/log/log.go @@ -0,0 +1,567 @@ +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +package logger + +import ( + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "log/syslog" + "os" + "path/filepath" + "strings" + "sync" + "time" +) + +// options contains command line or config file parameters/options/fields related to logging. +var options struct { + // logDir indicates the location for writing log file. + logDir string + + // logFile indicates the log file name to write to in the logDir location. + logFile string + + // logLevel indicates the type (ERROR, WARNING, INFO, DEBUG) of log messages to be logged. Setting a particular log-level say "INFO", logs all messages of that type (i.e., "INFO") as well as the previous messages types (i.e., ERROR and WARNING). + logLevel string + + // syslogTag indicates the log tag to write into syslog. + syslogTag string +} + +// Type is used to track logger type +type Type int + +// Below logger types are supported. +const ( + FileLog = 1 << iota + SysLog +) + +var progname = filepath.Base(os.Args[0]) + +// DefaultLogDir used before reading conf file or cmdline params, and could be overridden by conf file or cmdline params. +var DefaultLogDir = "/var/log/asum/" + +// DefaultLogFile used before reading conf file or cmdline params, and could be overriden by conf file or cmdline params +var DefaultLogFile = progname + ".log" + +// DefaultLogPath used in case if path to log file is not specified in config or cmdline. +var DefaultLogPath = DefaultLogDir + DefaultLogFile + +// DefaultLogLevel used in case if it's not specified in config or cmdline. +var DefaultLogLevel = "INFO" + +const ( + syslogConfig = "/etc/rsyslog.d/10-vxos-asum.conf" + syslogFacility = syslog.LOG_LOCAL0 +) + +// SyslogTagPrefix defines tag name for syslog. +const SyslogTagPrefix = "vxos-asum@" + +// Config is used to track user defined configuration. +type Config struct { + loggerType Type + level string + module string + logfilePath string +} + +// FileLogConfig Setup FileLog Config +func FileLogConfig(level, file, module string) Config { + return Config{loggerType: FileLog, level: level, module: module, logfilePath: file} +} + +// SyslogConfig Setup SysLog Config +func SyslogConfig(level, module string) Config { + return Config{loggerType: SysLog, level: level, module: module, logfilePath: ""} +} + +// ConsoleLogger will print message on screen and write to log file +// The message will always be printed regardless the log level +// The writing action depends on the log level +type ConsoleLogger struct { + prefix string + logger *log.Logger +} + +// Logger implements functions for all log levels +type Logger struct { + //Logger for log Debug messages. + debug *log.Logger + //Logger for log Info messages. + info *log.Logger + //Logger for log Warning messages. + warning *log.Logger + //Logger for log Error messages. + error *log.Logger +} + +// Debug returns the debug logger +func (logger *Logger) Debug() *log.Logger { + return logger.debug +} + +// Info returns the info logger +func (logger *Logger) Info() *log.Logger { + return logger.info +} + +// Warning returns the warning logger +func (logger *Logger) Warning() *log.Logger { + return logger.warning +} + +// Error returns te error logger +func (logger *Logger) Error() *log.Logger { + return logger.error +} + +// Get returns the object of logger type +func Get() *Logger { + return &Logger{debug: Debug, info: Info, warning: Warning, error: Error} +} + +// Printf prints message in console and writes to log file +func (consoleLog *ConsoleLogger) Printf(msg string, args ...interface{}) { + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + fmt.Printf(msg, args...) + str := consoleLog.prefix + msg + consoleLog.logger.Printf(str, args...) +} + +// PrintNReturnError calls Printf and returns wrapped message as error +func (consoleLog *ConsoleLogger) PrintNReturnError(msg string, args ...interface{}) error { + err := fmt.Errorf(msg, args...) + consoleLog.Printf(msg, args...) + return err +} + +// LogHandle the log handle interface +type LogHandle interface { + Write(p []byte) (n int, err error) + Close() error +} + +// CloseLogHandle closes log handler +func CloseLogHandle(handle LogHandle) error { + if handle != nil { + err := handle.Close() + if err == nil { + handle = nil + } + return err + } + return nil +} + +// FileLogHandle the file log handler +type FileLogHandle struct { + logFile *os.File + hostname *string +} + +func (handle *FileLogHandle) Write(p []byte) (n int, err error) { + t := time.Now() + // required timestamp format is based on both rfc3339 and ISO 8601 + // YYYY-MM-DDThh:mm:ss.mmm+04:00 + prefix := t.Format(strings.Replace(time.RFC3339, "Z", ".000-", 1)) + if handle.hostname != nil { + prefix = prefix + " " + *handle.hostname + " " + } + + buf := []byte(prefix) + buf = append(buf, p...) + + n, err = handle.logFile.Write(buf) + if err != nil || n != len(buf) { + return n, err + } + // io.MultiWriter will verify write count, return original bytes without prefixes + return len(p), nil +} + +// Close close the FileLog handler. +func (handle *FileLogHandle) Close() error { + if handle.logFile != nil { + return handle.logFile.Close() + } + return nil +} + +var ( + //ConsoleDebug logger for console and log Debug messages. + ConsoleDebug ConsoleLogger + //ConsoleInfo logger for console and log Info messages. + ConsoleInfo ConsoleLogger + //ConsoleWarning logger for console and log Warning messages. + ConsoleWarning ConsoleLogger + //ConsoleError logger for console and log Error messages. + ConsoleError ConsoleLogger + //Debug logger for logging Debug messages. + Debug *log.Logger + //Info logger for logging Info messages. + Info *log.Logger + //Warning logger for logging Warning messages. + Warning *log.Logger + //Error logger for logging Error messages. + Error *log.Logger + fileLogHandle LogHandle + syslogDebugHandle LogHandle + syslogInfoHandle LogHandle + syslogWarnHandle LogHandle + syslogErrorHandle LogHandle + + fOpenFile = os.OpenFile + fCloseLogHandle = CloseLogHandle + fHostname = os.Hostname + fMkdirAll = os.MkdirAll + fNewSyslogger = func(priority syslog.Priority, tag string) (LogHandle, error) { + return syslog.New(priority, tag) + } + + // Lock to ensure thread safe behaviour when initializing and de-initializing the singleton logger object + singleLogger = sync.Mutex{} +) + +// IsFileLogger returns true if file log is initialized (and not syslog) +func IsFileLogger() bool { + if fileLogHandle != nil { + return true + } + return false +} + +// initLogger sets logger for all supported log levels +func initLogger( + traceHandle io.Writer, + infoHandle io.Writer, + warningHandle io.Writer, + errorHandle io.Writer) { + + Debug = log.New(traceHandle, + "DEBUG: ", + 0) + + Info = log.New(infoHandle, + "INFO: ", + 0) + + Warning = log.New(warningHandle, + "WARNING: ", + 0) + + Error = log.New(errorHandle, + "ERROR: ", + 0) + + ConsoleDebug = ConsoleLogger{"[DEBUG] ", Debug} + ConsoleInfo = ConsoleLogger{"[INFO] ", Info} + ConsoleWarning = ConsoleLogger{"[WARNING] ", Warning} + ConsoleError = ConsoleLogger{"[ERROR] ", Error} +} + +func initFileLogHandle(myLogFile string) error { + if fileLogHandle != nil { + return nil + } + + var err error + myLogFile = filepath.Clean(myLogFile) + myLogFileNoExt := myLogFile + if strings.HasSuffix(myLogFile, ".log") { + myLogFileNoExt = strings.TrimSuffix(myLogFile, filepath.Ext(myLogFile)) + } + ts := time.Now().Format(time.RFC3339Nano) + logFile := myLogFileNoExt + "." + ts + ".log" + + _, err = os.Stat(logFile) + if os.IsNotExist(err) { + logFileDir := filepath.Dir(logFile) + err = fMkdirAll(logFileDir, 0755) + if err != nil { + return fmt.Errorf("os.MkdirAll(%s) failed", logFileDir) + } + } + + file, err := fOpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + return err + } + hostname, err := fHostname() + if err != nil { + hostname = "unknown" + } + fh := FileLogHandle{} + fh.logFile = file + fh.hostname = &hostname + fileLogHandle = &fh + + fmt.Println("Log:", logFile) + return nil +} + +func initSyslogHandle(syslogTag string) (err error) { + if syslogDebugHandle == nil { + syslogDebugHandle, err = fNewSyslogger(syslogFacility|syslog.LOG_DEBUG, syslogTag) + if err != nil { + syslogDebugHandle = nil + return errors.New("creating syslog debug handle failed") + } + } + + if syslogInfoHandle == nil { + syslogInfoHandle, err = fNewSyslogger(syslogFacility|syslog.LOG_INFO, syslogTag) + if err != nil { + syslogInfoHandle = nil + return errors.New("creating syslog info handle failed") + } + } + + if syslogWarnHandle == nil { + syslogWarnHandle, err = fNewSyslogger(syslogFacility|syslog.LOG_WARNING, syslogTag) + if err != nil { + syslogWarnHandle = nil + return errors.New("creating syslog warning handle failed") + } + } + if syslogErrorHandle == nil { + syslogErrorHandle, err = fNewSyslogger(syslogFacility|syslog.LOG_ERR, syslogTag) + if err != nil { + syslogErrorHandle = nil + return errors.New("creating syslog error handle failed") + } + } + return nil +} + +// InitLogging initializes logging to use syslog when its config exists, or to use file logging automatically. This setting might be overridden after reading config or cmdline parameters and by calling InitializeLogger(). +func InitLogging() { + // Use syslog until the config file is read. + // If syslog initialization fails, file logging will be used. + useFileLog := true + if IsSysLogConfigPresent() { + useFileLog = false + module := progname + "-" + os.Args[1] + err := InitSysLogger(module, DefaultLogLevel) + if err != nil { + fmt.Printf("Failed to initialize SysLog for logging [%#v]. Proceeding with FileLog...\n", err) + useFileLog = true + } + } + if useFileLog { + // NOTE: while running tests, the path of binary would be in `/tmp/`, + // so, using relative logging path w.r.t. binary wouldn't be accessible on Jenkins. + // So, use absolute path which also has write permissions (like current source directory). + + // Update default values with specified env values. + logDir := os.Getenv("PM_LOG_DIR") + if logDir != "" { + DefaultLogDir = filepath.Clean(logDir) + } + logFile := os.Getenv("PM_LOG_FILE") + if logFile != "" { + DefaultLogFile = filepath.Clean(logFile) + } + // INFO: Update DefaultLogPath in case if values are passed via env, + // so that comparison at a later path succeeds. + DefaultLogPath = filepath.Clean(DefaultLogDir + string(os.PathSeparator) + DefaultLogFile) + + logLevel := os.Getenv("PM_LOG_LEVEL") + if logLevel != "" { + DefaultLogLevel = logLevel + } + + err := InitFileLogger(DefaultLogPath, DefaultLogLevel) + if err != nil { + fmt.Printf("Failed to initialize file logger [%#v].\n", err) + os.Exit(1) + } + } +} + +// InitializeLogger initializes customized file logger or syslog logger with given log config +func InitializeLogger(config Config) error { + singleLogger.Lock() + defer singleLogger.Unlock() + var debugWriter, infoWriter, warnWriter, errWriter io.Writer + if config.loggerType == FileLog { + if err := initFileLogHandle(config.logfilePath); err != nil { + return err + } + debugWriter = fileLogHandle + infoWriter = fileLogHandle + warnWriter = fileLogHandle + errWriter = fileLogHandle + } else { + syslogTag := config.module + if strings.Index(syslogTag, SyslogTagPrefix) != 0 { + // all flex appliance syslog tag starts with same prefix, in case we could filter the logs + syslogTag = SyslogTagPrefix + syslogTag + } + if err := initSyslogHandle(syslogTag); err != nil { + return err + } + debugWriter = syslogDebugHandle + infoWriter = syslogInfoHandle + warnWriter = syslogWarnHandle + errWriter = syslogErrorHandle + } + + switch config.level { + case "DEBUG": + // Enabled logging levels: debug, info, warning, error + initLogger(debugWriter, infoWriter, warnWriter, errWriter) + case "INFO": + // Enabled logging levels: info, warning, error + initLogger(ioutil.Discard, infoWriter, warnWriter, errWriter) + case "WARNING": + // Enabled logging levels: warning, error + initLogger(ioutil.Discard, ioutil.Discard, warnWriter, errWriter) + default: + // Enabled logging levels: error only + initLogger(ioutil.Discard, ioutil.Discard, ioutil.Discard, errWriter) + } + Info.Println("CMD:", os.Args[:]) + return nil +} + +// InitFileLogger initializes logger with given log level. +func InitFileLogger(logFile, logLevel string) error { + return InitializeLogger(FileLogConfig(logLevel, logFile, "")) +} + +// IsSysLogConfigPresent indicates whether syslog config is present. +func IsSysLogConfigPresent() bool { + _, err := os.Stat(syslogConfig) + if err != nil { + return false + } + return true +} + +// InitSysLogger initializes logger with given log level. +func InitSysLogger(module, logLevel string) error { + // Make sure the config file exist + if !IsSysLogConfigPresent() { + return fmt.Errorf("syslog config file %v is not present", syslogConfig) + } + return InitializeLogger(SyslogConfig(logLevel, module)) +} + +// DeInitLogger closes the log file and syslog handler +func DeInitLogger() []error { + singleLogger.Lock() + defer singleLogger.Unlock() + var errList []error + closeErr := fCloseLogHandle(fileLogHandle) + if closeErr != nil { + errList = append(errList, closeErr) + } + fileLogHandle = nil + + closeErr = fCloseLogHandle(syslogDebugHandle) + if closeErr != nil { + errList = append(errList, closeErr) + } + syslogDebugHandle = nil + + closeErr = fCloseLogHandle(syslogInfoHandle) + if closeErr != nil { + errList = append(errList, closeErr) + } + syslogInfoHandle = nil + + closeErr = fCloseLogHandle(syslogWarnHandle) + if closeErr != nil { + errList = append(errList, closeErr) + } + syslogWarnHandle = nil + + closeErr = fCloseLogHandle(syslogErrorHandle) + if closeErr != nil { + errList = append(errList, closeErr) + } + syslogErrorHandle = nil + + return errList +} + +// RegisterCommandOptions registers the command options related to the log options. +func RegisterCommandOptions(f *flag.FlagSet, defaultParams map[string]string) { + defaultLogDir, ok := defaultParams["log-dir"] + if !ok { + defaultLogDir = DefaultLogDir + } + defaultLogFile, ok := defaultParams["log-file"] + if !ok { + defaultLogFile = DefaultLogFile + } + defaultLogLevel, ok := defaultParams["log-level"] + if !ok { + defaultLogLevel = DefaultLogLevel + } + f.StringVar( + &options.logDir, + "log-dir", + defaultLogDir, + "Directory for the log file.", + ) + f.StringVar( + &options.logFile, + "log-file", + defaultLogFile, + "Name of the log file.", + ) + f.StringVar( + &options.logLevel, + "log-level", + defaultLogLevel, + "Log level ('ERROR', 'WARNING', 'INFO', 'DEBUG').", + ) + f.StringVar( + &options.syslogTag, + "log-tag", + "", + "Syslog tag name.", + ) +} + +// GetLogDir provides location for storing logs. +func GetLogDir() string { + // Clean path only if it's not empty, as otherwise it'll convert empty value to current directory. + if options.logDir != "" { + return filepath.FromSlash(filepath.Clean(options.logDir) + + string(os.PathSeparator)) + } + return "" +} + +// GetLogFile provides name of logfile. +func GetLogFile() string { + return options.logFile +} + +// GetLogLevel provides log level. +func GetLogLevel() string { + switch options.logLevel { + case "ERROR": + case "WARNING": + case "INFO": + case "DEBUG": + default: + options.logLevel = DefaultLogLevel + } + return options.logLevel +} + +// GetLogTag provides syslog tag name. +func GetLogTag() string { + return options.syslogTag +} diff --git a/vendor/github.com/VeritasOS/plugin-manager/utils/os/os.go b/vendor/github.com/VeritasOS/plugin-manager/utils/os/os.go new file mode 100644 index 0000000..fc7e92f --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/utils/os/os.go @@ -0,0 +1,70 @@ +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +// Package os contains some utility functions as well as pointers to os package +// functions so as to help in unit testing (mocking). +package os + +import ( + "os" + "os/exec" + "strings" + "sync" + + logger "github.com/VeritasOS/plugin-manager/utils/log" +) + +// ExecCommand a pointer to exec.Command +var ExecCommand = exec.Command + +// os package related functions. +var ( + // OsOpenFile is a pointer to os.OpenFile + OsOpenFile = os.OpenFile + + // OsRemove is a pointer to os.RemoveAll + OsRemoveAll = os.RemoveAll + + // OsMkdirAll is a pointer to os.MkdirAll + OsMkdirAll = os.MkdirAll + + // OsEnviron is a pointer to os.Environ + OsEnviron = os.Environ + + // Map maintaining os environment variables. + envMap = map[string]string{} + isEnvMapInit sync.Once +) + +// EnvMap returns OS environment variable values in a map. +// Any ENV variables set after this program is started (or func is called first time) would not be part of the map. +func EnvMap() map[string]string { + logger.Debug.Printf("In EnvMap()") + defer logger.Debug.Printf("Exiting EnvMap()") + // If already initialized once, return that map. + isEnvMapInit.Do(initEnvMap) + // Make copy as maps are passed/returned as reference. + // INFO: maps.Copy requires 1.21 golang version on builder. + // maps.Copy(em, envMap) + em := make(map[string]string, len(envMap)) + for k, v := range envMap { + em[k] = v + } + return em +} + +func initEnvMap() { + logger.Debug.Printf("In initEnvMap()") + defer logger.Debug.Printf("Exiting initEnvMap()") + + for _, keyval := range OsEnviron() { + // INFO: strings.Cut requires 1.18 golang version on builder. + // key, val, found := strings.Cut(keyval, "=") + fields := strings.SplitN(keyval, "=", 2) + if len(fields) == 2 { + key := fields[0] + val := fields[1] + // logger.Debug.Printf("%v (key) = %v (value)", key, val) + envMap[key] = val + } + } +} diff --git a/vendor/github.com/VeritasOS/plugin-manager/utils/output/output.go b/vendor/github.com/VeritasOS/plugin-manager/utils/output/output.go new file mode 100644 index 0000000..1aa5c01 --- /dev/null +++ b/vendor/github.com/VeritasOS/plugin-manager/utils/output/output.go @@ -0,0 +1,112 @@ +// Copyright (c) 2024 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9 + +package output + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "path/filepath" + + logger "github.com/VeritasOS/plugin-manager/utils/log" + + "gopkg.in/yaml.v3" +) + +// cmdOptions contains commandline parameters/options for generating output in specified format. +var cmdOptions struct { + // File indicates the file name to write the results. + File string + + // Format indicates the output format to write the results. + // Supported formats are "json", "yaml". + Format string +} + +// GetFile returns the output file name that is currently set. +func GetFile() string { + return cmdOptions.File +} + +// GetFormat returns the output format type that is currently set. +func GetFormat() string { + return cmdOptions.Format +} + +// RegisterCommandOptions registers the command options related to the output options. +func RegisterCommandOptions(f *flag.FlagSet, defaultParams map[string]string) { + defaultOutputFile, ok := defaultParams["output-file"] + if !ok { + defaultOutputFile = "" + } + defaultOutputFormat, ok := defaultParams["output-format"] + if !ok { + defaultOutputFormat = "" + } + f.StringVar( + &cmdOptions.File, + "output-file", + defaultOutputFile, + "Name of the file to write the results.", + ) + f.StringVar( + &cmdOptions.Format, + "output-format", + defaultOutputFormat, + "The format of output to display the results.\n"+ + "Supported output formats are 'json', 'yaml'.", + ) +} + +// Write the given data in the format {json|yaml} that was set in options into +// a specified file. If file is not specified, then it will print on STDOUT. +func Write(data interface{}) error { + logger.Debug.Println("Entering Write") + defer logger.Debug.Println("Exiting Write") + + if cmdOptions.Format == "" { + // log.Printf("Skipping the Write() as output format is not set.") + return nil + } + logger.Info.Printf("Writing output in %s to file name: %s", + cmdOptions.Format, cmdOptions.File) + return writeToFile(data, cmdOptions.Format, cmdOptions.File) +} + +// writeToFile writes the given data in the specified format {json|yaml} into +// a specified file. If file is not specified, then it will print on STDOUT. +func writeToFile(data interface{}, format string, filePath string) error { + logger.Debug.Println("Entering writeToFile") + defer logger.Debug.Println("Exiting writeToFile") + + var err error + var out []byte + + if format == "json" { + out, err = json.MarshalIndent(data, "", " ") + } else { + out, err = yaml.Marshal(data) + } + if err != nil { + logger.Error.Printf("Unable to marshal %s data into %s, err=%v", data, format, err) + return err + } + + if filePath == "" { + // fmt.Println("File name to write the status is not specified.", + // "Outputting to console.") + fmt.Println(string(out)) + return nil + } + + filePath = filepath.FromSlash(filePath) + logger.Debug.Printf("Output file: %s", filePath) + err = ioutil.WriteFile(filePath, out, 0764) + if err != nil { + logger.Info.Printf("Unable to write to specified file %s. Error: %v", filePath, err) + return err + } + + return nil +} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml deleted file mode 100644 index 7348c50..0000000 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go: - - "1.4.x" - - "1.5.x" - - "1.6.x" - - "1.7.x" - - "1.8.x" - - "1.9.x" - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - "1.14.x" - - "tip" - -go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/vendor/gopkg.in/yaml.v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go deleted file mode 100644 index 0ee738e..0000000 --- a/vendor/gopkg.in/yaml.v2/encode.go +++ /dev/null @@ -1,390 +0,0 @@ -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// jsonNumber is the interface of the encoding/json.Number datatype. -// Repeating the interface here avoids a dependency on encoding/json, and also -// supports other libraries like jsoniter, which use a similar datatype with -// the same interface. Detecting this interface is useful when dealing with -// structures containing json.Number, which is a string under the hood. The -// encoder should prefer the use of Int64(), Float64() and string(), in that -// order, when encoding this type. -type jsonNumber interface { - Float64() (float64, error) - Int64() (int64, error) - String() string -} - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - // doneInit holds whether the initial stream_start_event has been - // emitted. - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch m := iface.(type) { - case jsonNumber: - integer, err := m.Int64() - if err == nil { - // In this case the json.Number is a valid int64 - in = reflect.ValueOf(integer) - break - } - float, err := m.Float64() - if err == nil { - // In this case the json.Number is a valid float64 - in = reflect.ValueOf(float) - break - } - // fallback case - no number could be obtained - in = reflect.ValueOf(m.String()) - case time.Time, *time.Time: - // Although time.Time implements TextMarshaler, - // we don't want to treat it as a string for YAML - // purposes because YAML has special support for - // timestamps. - case Marshaler: - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - case encoding.TextMarshaler: - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.Type() == ptrTimeType { - e.timev(tag, in.Elem()) - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - if in.Type() == timeType { - e.timev(tag, in) - } else { - e.structv(tag, in) - } - case reflect.Slice, reflect.Array: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = yaml_BINARY_TAG - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod deleted file mode 100644 index 2cbb85a..0000000 --- a/vendor/gopkg.in/yaml.v2/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gopkg.in/yaml.v2 - -go 1.15 - -require gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go deleted file mode 100644 index a2dde60..0000000 --- a/vendor/gopkg.in/yaml.v2/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v3/LICENSE similarity index 51% rename from vendor/gopkg.in/yaml.v2/LICENSE.libyaml rename to vendor/gopkg.in/yaml.v3/LICENSE index 8da58fb..2683e4b 100644 --- a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml +++ b/vendor/gopkg.in/yaml.v3/LICENSE @@ -1,16 +1,17 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go -Copyright (c) 2006 Kirill Simonov +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in @@ -29,3 +30,21 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE similarity index 100% rename from vendor/gopkg.in/yaml.v2/NOTICE rename to vendor/gopkg.in/yaml.v3/NOTICE diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v3/README.md similarity index 66% rename from vendor/gopkg.in/yaml.v2/README.md rename to vendor/gopkg.in/yaml.v3/README.md index b50c6e8..08eb1ba 100644 --- a/vendor/gopkg.in/yaml.v2/README.md +++ b/vendor/gopkg.in/yaml.v3/README.md @@ -12,7 +12,23 @@ C library to parse and generate YAML data quickly and reliably. Compatibility ------------- -The yaml package supports most of YAML 1.1 and 1.2, including support for +The yaml package supports most of YAML 1.2, but preserves some behavior +from 1.1 for backwards compatibility. + +Specifically, as of v3 of the yaml package: + + - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being + decoded into a typed bool value. Otherwise they behave as a string. Booleans + in YAML 1.2 are _true/false_ only. + - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ + as specified in YAML 1.2, because most parsers still use the old format. + Octals in the _0o777_ format are supported though, so new files work. + - Does not support base-60 floats. These are gone from YAML 1.2, and were + actually never supported by this package as it's clearly a poor choice. + +and offers backwards +compatibility with YAML 1.1 in some cases. +1.2, including support for anchors, tags, map merging, etc. Multi-document unmarshalling is not yet implemented, and base-60 floats from YAML 1.1 are purposefully not supported since they're a poor design and are gone in YAML 1.2. @@ -20,29 +36,30 @@ supported since they're a poor design and are gone in YAML 1.2. Installation and usage ---------------------- -The import path for the package is *gopkg.in/yaml.v2*. +The import path for the package is *gopkg.in/yaml.v3*. To install it, run: - go get gopkg.in/yaml.v2 + go get gopkg.in/yaml.v3 API documentation ----------------- If opened in a browser, the import path itself leads to the API documentation: - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) API stability ------------- -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). +The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). License ------- -The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. Example @@ -55,7 +72,7 @@ import ( "fmt" "log" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) var data = ` diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v3/apic.go similarity index 93% rename from vendor/gopkg.in/yaml.v2/apic.go rename to vendor/gopkg.in/yaml.v3/apic.go index acf7140..ae7d049 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml import ( @@ -79,8 +101,6 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { parser.encoding = encoding } -var disableLineWrapping = false - // Create a new emitter object. func yaml_emitter_initialize(emitter *yaml_emitter_t) { *emitter = yaml_emitter_t{ @@ -88,9 +108,7 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), - } - if disableLineWrapping { - emitter.best_width = -1 + best_width: -1, } } @@ -143,7 +161,7 @@ func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { emitter.canonical = canonical } -//// Set the indentation increment. +// Set the indentation increment. func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { if indent < 2 || indent > 9 { indent = 2 @@ -293,29 +311,14 @@ func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { } } -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} // Create SCALAR. func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v3/decode.go similarity index 51% rename from vendor/gopkg.in/yaml.v2/decode.go rename to vendor/gopkg.in/yaml.v3/decode.go index 129bc2a..0173b69 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -1,3 +1,18 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package yaml import ( @@ -11,34 +26,16 @@ import ( "time" ) -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - // For an alias node, alias holds the resolved alias. - alias *node - value string - implicit bool - children []*node - anchors map[string]*node -} - // ---------------------------------------------------------------------------- // Parser, produces a node tree out of a libyaml event stream. type parser struct { parser yaml_parser_t event yaml_event_t - doc *node + doc *Node + anchors map[string]*Node doneInit bool + textless bool } func newParser(b []byte) *parser { @@ -66,6 +63,7 @@ func (p *parser) init() { if p.doneInit { return } + p.anchors = make(map[string]*Node) p.expect(yaml_STREAM_START_EVENT) p.doneInit = true } @@ -102,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t { if p.event.typ != yaml_NO_EVENT { return p.event.typ } - if !yaml_parser_parse(&p.parser, &p.event) { + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { p.fail() } return p.event.typ @@ -111,14 +112,18 @@ func (p *parser) peek() yaml_event_type_t { func (p *parser) fail() { var where string var line int - if p.parser.problem_mark.line != 0 { + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line // Scanner errors don't iterate line before returning error if p.parser.error == yaml_SCANNER_ERROR { line++ } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line } if line != 0 { where = "line " + strconv.Itoa(line) + ": " @@ -132,13 +137,14 @@ func (p *parser) fail() { failf("%s%s", where, msg) } -func (p *parser) anchor(n *node, anchor []byte) { +func (p *parser) anchor(n *Node, anchor []byte) { if anchor != nil { - p.doc.anchors[string(anchor)] = n + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n } } -func (p *parser) parse() *node { +func (p *parser) parse() *Node { p.init() switch p.peek() { case yaml_SCALAR_EVENT: @@ -154,67 +160,148 @@ func (p *parser) parse() *node { case yaml_STREAM_END_EVENT: // Happens when attempting to decode an empty buffer. return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") default: - panic("attempted to parse unknown event: " + p.event.typ.String()) + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) } } -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child } -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") p.doc = n p.expect(yaml_DOCUMENT_START_EVENT) - n.children = append(n.children, p.parse()) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } p.expect(yaml_DOCUMENT_END_EVENT) return n } -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - n.alias = p.doc.anchors[n.value] - if n.alias == nil { - failf("unknown anchor '%s' referenced", n.value) +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) } p.expect(yaml_ALIAS_EVENT) return n } -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle p.anchor(n, p.event.anchor) p.expect(yaml_SCALAR_EVENT) return n } -func (p *parser) sequence() *node { - n := p.node(sequenceNode) +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } p.anchor(n, p.event.anchor) p.expect(yaml_SEQUENCE_START_EVENT) for p.peek() != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) + p.parseChild(n) } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) p.expect(yaml_SEQUENCE_END_EVENT) return n } -func (p *parser) mapping() *node { - n := p.node(mappingNode) +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } p.anchor(n, p.event.anchor) p.expect(yaml_MAPPING_START_EVENT) for p.peek() != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" } p.expect(yaml_MAPPING_END_EVENT) return n @@ -224,48 +311,70 @@ func (p *parser) mapping() *node { // Decoder, unmarshals a node into a provided value. type decoder struct { - doc *node - aliases map[*node]bool - mapType reflect.Type + doc *Node + aliases map[*Node]bool terrors []string - strict bool + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool decodeCount int aliasCount int aliasDepth int + + mergedFields map[interface{}]bool } var ( - mapItemType = reflect.TypeOf(MapItem{}) + nodeType = reflect.TypeOf(Node{}) durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() timeType = reflect.TypeOf(time.Time{}) ptrTimeType = reflect.TypeOf(&time.Time{}) ) -func newDecoder(strict bool) *decoder { - d := &decoder{mapType: defaultMapType, strict: strict} - d.aliases = make(map[*node]bool) +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) return d } -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + value := n.Value + if tag != seqTag && tag != mapTag { if len(value) > 10 { value = " `" + value[:7] + "...`" } else { value = " `" + value + "`" } } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true } -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { terrlen := len(d.terrors) err := u.UnmarshalYAML(func(v interface{}) (err error) { defer handleErr(&err) @@ -294,8 +403,8 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { // its types unmarshalled appropriately. // // If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { return out, false, false } again := true @@ -309,15 +418,40 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm again = true } if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { good = d.callUnmarshaler(n, u) return out, true, good } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } } } return out, false, false } +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + const ( // 400,000 decode operations is ~500kb of dense object declarations, or // ~5kb of dense object declarations with 10000% alias expansion @@ -347,7 +481,7 @@ func allowedAliasRatio(decodeCount int) float64 { } } -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { d.decodeCount++ if d.aliasDepth > 0 { d.aliasCount++ @@ -355,46 +489,55 @@ func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { failf("document contains excessive aliasing") } - switch n.kind { - case documentNode: + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: return d.document(n, out) - case aliasNode: + case AliasNode: return d.alias(n, out) } out, unmarshaled, good := d.prepare(n, out) if unmarshaled { return good } - switch n.kind { - case scalarNode: + switch n.Kind { + case ScalarNode: good = d.scalar(n, out) - case mappingNode: + case MappingNode: good = d.mapping(n, out) - case sequenceNode: + case SequenceNode: good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + failf("cannot decode node with unknown kind %d", n.Kind) } return good } -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { d.doc = n - d.unmarshal(n.children[0], out) + d.unmarshal(n.Content[0], out) return true } return false } -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { if d.aliases[n] { // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.value) + failf("anchor '%s' value contains itself", n.Value) } d.aliases[n] = true d.aliasDepth++ - good = d.unmarshal(n.alias, out) + good = d.unmarshal(n.Alias, out) d.aliasDepth-- delete(d.aliases, n) return good @@ -408,15 +551,26 @@ func resetMap(out reflect.Value) { } } -func (d *decoder) scalar(n *node, out reflect.Value) bool { +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { var tag string var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value + if n.indicatedString() { + tag = strTag + resolved = n.Value } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { data, err := base64.StdEncoding.DecodeString(resolved.(string)) if err != nil { failf("!!binary value contains invalid base64 data") @@ -425,12 +579,7 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool { } } if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true + return d.null(out) } if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { // We've resolved to exactly the type we want, so use that. @@ -443,13 +592,13 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool { u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) if ok { var text []byte - if tag == yaml_BINARY_TAG { + if tag == binaryTag { text = []byte(resolved.(string)) } else { // We let any value be unmarshaled into TextUnmarshaler. // That might be more lax than we'd like, but the // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.value) + text = []byte(n.Value) } err := u.UnmarshalText(text) if err != nil { @@ -460,47 +609,37 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool { } switch out.Kind() { case reflect.String: - if tag == yaml_BINARY_TAG { + if tag == binaryTag { out.SetString(resolved.(string)) return true } - if resolved != nil { - out.SetString(n.value) - return true - } + out.SetString(n.Value) + return true case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else if tag == yaml_TIMESTAMP_TAG { - // It looks like a timestamp but for backward compatibility - // reasons we set it as a string, so that code that unmarshals - // timestamp-like values into interface{} will continue to - // see a string and not a time.Time. - // TODO(v3) Drop this. - out.Set(reflect.ValueOf(n.value)) - } else { - out.Set(reflect.ValueOf(resolved)) - } + out.Set(reflect.ValueOf(resolved)) return true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + switch resolved := resolved.(type) { case int: - if !out.OverflowInt(int64(resolved)) { + if !isDuration && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) return true } case int64: - if !out.OverflowInt(resolved) { + if !isDuration && !out.OverflowInt(resolved) { out.SetInt(resolved) return true } case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) return true } case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) return true } @@ -541,6 +680,17 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool { case bool: out.SetBool(resolved) return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } } case reflect.Float32, reflect.Float64: switch resolved := resolved.(type) { @@ -563,13 +713,7 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool { return true } case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - return true - } + panic("yaml internal error: please report the issue") } d.terror(n, tag, out) return false @@ -582,8 +726,8 @@ func settableValueOf(i interface{}) reflect.Value { return sv } -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) var iface reflect.Value switch out.Kind() { @@ -598,7 +742,7 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { iface = out out = settableValueOf(make([]interface{}, l)) default: - d.terror(n, yaml_SEQ_TAG, out) + d.terror(n, seqTag, out) return false } et := out.Type().Elem() @@ -606,7 +750,7 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { j := 0 for i := 0; i < l; i++ { e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { + if ok := d.unmarshal(n.Content[i], e); ok { out.Index(j).Set(e) j++ } @@ -620,51 +764,79 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { return true } -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } switch out.Kind() { case reflect.Struct: return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) case reflect.Map: // okay case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true + out = reflect.MakeMap(d.generalMapType) } + iface.Set(out) default: - d.terror(n, yaml_MAP_TAG, out) + d.terror(n, mapTag, out) return false } + outt := out.Type() kt := outt.Key() et := outt.Elem() - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } } + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false if out.IsNil() { out.Set(reflect.MakeMap(outt)) + mapIsNew = true } - l := len(n.children) for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] continue } k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } kkind := k.Kind() if kkind == reflect.Interface { kkind = k.Elem().Kind() @@ -673,87 +845,83 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { failf("invalid map key: %#v", k.Interface()) } e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - d.setMapIndex(n.children[i+1], out, k, e) + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) } } } - d.mapType = mapType - return true -} -func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { - if d.strict && out.MapIndex(k) != zeroValue { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) - return + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) } - out.SetMapIndex(k, v) + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true } -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { return false } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) + l := len(n.Content) for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false } } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType return true } -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { sinfo, err := getStructInfo(out.Type()) if err != nil { panic(err) } - name := settableValueOf("") - l := len(n.children) var inlineMap reflect.Value var elemType reflect.Type if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) elemType = inlineMap.Type().Elem() } + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node var doneFields []bool - if d.strict { + if d.uniqueKeys { doneFields = make([]bool, len(sinfo.FieldsList)) } + name := settableValueOf("") + l := len(n.Content) for i := 0; i < l; i += 2 { - ni := n.children[i] + ni := n.Content[i] if isMerge(ni) { - d.merge(n.children[i+1], out) + mergeNode = n.Content[i+1] continue } if !d.unmarshal(ni, name) { continue } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.strict { + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) continue } doneFields[info.Id] = true @@ -762,20 +930,25 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { if info.Inline == nil { field = out.Field(info.Num) } else { - field = out.FieldByIndex(info.Inline) + field = d.fieldByIndex(n, out, info.Inline) } - d.unmarshal(n.children[i+1], field) + d.unmarshal(n.Content[i+1], field) } else if sinfo.InlineMap != -1 { if inlineMap.IsNil() { inlineMap.Set(reflect.MakeMap(inlineMap.Type())) } value := reflect.New(elemType).Elem() - d.unmarshal(n.children[i+1], value) - d.setMapIndex(n.children[i+1], inlineMap, name, value) - } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } return true } @@ -783,24 +956,34 @@ func failWantMap() { failf("map merge requires map or sequence of maps as the value") } -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - if n.alias != nil && n.alias.kind != mappingNode { +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { failWantMap() } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - if ni.alias != nil && ni.alias.kind != mappingNode { + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { failWantMap() } - } else if ni.kind != mappingNode { + } else if ni.Kind != MappingNode { failWantMap() } d.unmarshal(ni, out) @@ -808,8 +991,10 @@ func (d *decoder) merge(n *node, out reflect.Value) { default: failWantMap() } + + d.mergedFields = mergedFields } -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) } diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go similarity index 80% rename from vendor/gopkg.in/yaml.v2/emitterc.go rename to vendor/gopkg.in/yaml.v3/emitterc.go index a1c2cc5..0f47c9c 100644 --- a/vendor/gopkg.in/yaml.v2/emitterc.go +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml import ( @@ -43,8 +65,13 @@ func put_break(emitter *yaml_emitter_t) bool { default: panic("unknown line break setting") } + if emitter.column == 0 { + emitter.space_above = true + } emitter.column = 0 emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true return true } @@ -97,8 +124,13 @@ func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { if !write(emitter, s, i) { return false } + if emitter.column == 0 { + emitter.space_above = true + } emitter.column = 0 emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true } return true } @@ -203,7 +235,14 @@ func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool emitter.indent = 0 } } else if !indentless { - emitter.indent += emitter.best_indent + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + } } return true } @@ -228,16 +267,22 @@ func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bo return yaml_emitter_emit_document_end(emitter, event) case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: return yaml_emitter_emit_flow_mapping_value(emitter, event, true) @@ -298,6 +343,8 @@ func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t emitter.column = 0 emitter.whitespace = true emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 if emitter.encoding != yaml_UTF8_ENCODING { if !yaml_emitter_write_bom(emitter) { @@ -392,13 +439,22 @@ func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { return false } - if emitter.canonical { + if emitter.canonical || true { if !yaml_emitter_write_indent(emitter) { return false } } } + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE return true } @@ -425,7 +481,20 @@ func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event // Expect the root node. func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true } // Expect DOCUMENT-END. @@ -433,6 +502,12 @@ func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t if event.typ != yaml_DOCUMENT_END_EVENT { return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 if !yaml_emitter_write_indent(emitter) { return false } @@ -454,7 +529,7 @@ func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t } // Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { if first { if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { return false @@ -466,13 +541,15 @@ func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_e } if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { + if emitter.canonical && !first && !trail { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { if !yaml_emitter_write_indent(emitter) { return false } @@ -480,29 +557,62 @@ func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_e if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { return false } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } - if !first { + if !first && !trail { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { if !yaml_emitter_write_indent(emitter) { return false } } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true } // Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { if first { if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { return false @@ -514,13 +624,18 @@ func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_eve } if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } emitter.flow_level-- emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } if !yaml_emitter_write_indent(emitter) { return false } @@ -528,16 +643,33 @@ func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_eve if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { return false } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } - if !first { + if !first && !trail { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { if !yaml_emitter_write_indent(emitter) { return false @@ -571,14 +703,32 @@ func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_e return false } } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true } // Expect a block item node. func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + if !yaml_emitter_increase_indent(emitter, false, false) { return false } } @@ -589,6 +739,9 @@ func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_ emitter.states = emitter.states[:len(emitter.states)-1] return true } + if !yaml_emitter_process_head_comment(emitter) { + return false + } if !yaml_emitter_write_indent(emitter) { return false } @@ -596,7 +749,16 @@ func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_ return false } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true } // Expect a block key node. @@ -606,6 +768,9 @@ func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_ev return false } } + if !yaml_emitter_process_head_comment(emitter) { + return false + } if event.typ == yaml_MAPPING_END_EVENT { emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] @@ -616,6 +781,13 @@ func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_ev if !yaml_emitter_write_indent(emitter) { return false } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } if yaml_emitter_check_simple_key(emitter) { emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, true) @@ -641,8 +813,42 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ return false } } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 } // Expect a node. @@ -908,6 +1114,71 @@ func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { panic("unknown scalar style") } +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + if len(emitter.line_comment) == 0 { + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + // Check if a %YAML directive is valid. func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { if version_directive.major != 1 || version_directive.minor != 1 { @@ -987,6 +1258,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { flow_indicators = false line_breaks = false special_characters = false + tab_characters = false leading_space = false leading_break = false @@ -1055,7 +1327,9 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { } } - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { special_characters = true } if is_space(value, i) { @@ -1110,10 +1384,12 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { emitter.scalar_data.block_plain_allowed = false emitter.scalar_data.single_quoted_allowed = false } - if space_break || special_characters { + if space_break || tab_characters || special_characters { emitter.scalar_data.flow_plain_allowed = false emitter.scalar_data.block_plain_allowed = false emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { emitter.scalar_data.block_allowed = false } if line_breaks { @@ -1137,6 +1413,19 @@ func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bo emitter.tag_data.suffix = nil emitter.scalar_data.value = nil + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + switch event.typ { case yaml_ALIAS_EVENT: if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { @@ -1208,13 +1497,20 @@ func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { return false } } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } for emitter.column < indent { if !put(emitter, ' ') { return false } } emitter.whitespace = true - emitter.indention = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 return true } @@ -1311,7 +1607,7 @@ func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_ } func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { + if len(value) > 0 && !emitter.whitespace { if !put(emitter, ' ') { return false } @@ -1341,7 +1637,7 @@ func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allo if !write_break(emitter, value, &i) { return false } - emitter.indention = true + //emitter.indention = true breaks = true } else { if breaks { @@ -1358,7 +1654,9 @@ func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allo } } - emitter.whitespace = false + if len(value) > 0 { + emitter.whitespace = false + } emitter.indention = false if emitter.root_context { emitter.open_ended = true @@ -1397,7 +1695,7 @@ func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []by if !write_break(emitter, value, &i) { return false } - emitter.indention = true + //emitter.indention = true breaks = true } else { if breaks { @@ -1596,10 +1894,10 @@ func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - if !put_break(emitter) { + if !yaml_emitter_process_line_comment(emitter) { return false } - emitter.indention = true + //emitter.indention = true emitter.whitespace = true breaks := true for i := 0; i < len(value); { @@ -1607,7 +1905,7 @@ func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bo if !write_break(emitter, value, &i) { return false } - emitter.indention = true + //emitter.indention = true breaks = true } else { if breaks { @@ -1633,11 +1931,11 @@ func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) boo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - - if !put_break(emitter) { + if !yaml_emitter_process_line_comment(emitter) { return false } - emitter.indention = true + + //emitter.indention = true emitter.whitespace = true breaks := true @@ -1658,7 +1956,7 @@ func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) boo if !write_break(emitter, value, &i) { return false } - emitter.indention = true + //emitter.indention = true breaks = true } else { if breaks { @@ -1683,3 +1981,40 @@ func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) boo } return true } + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go new file mode 100644 index 0000000..de9e72a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod new file mode 100644 index 0000000..f407ea3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/go.mod @@ -0,0 +1,5 @@ +module "gopkg.in/yaml.v3" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go similarity index 85% rename from vendor/gopkg.in/yaml.v2/parserc.go rename to vendor/gopkg.in/yaml.v3/parserc.go index 81d05df..268558a 100644 --- a/vendor/gopkg.in/yaml.v2/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml import ( @@ -45,11 +67,46 @@ import ( // Peek the next token in the token queue. func peek_token(parser *yaml_parser_t) *yaml_token_t { if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token } return nil } +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + // Remove the next token from the queue (must be called after peek_token). func skip_token(parser *yaml_parser_t) { parser.token_available = false @@ -224,10 +281,32 @@ func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) parser.state = yaml_PARSE_BLOCK_NODE_STATE + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + *event = yaml_event_t{ typ: yaml_DOCUMENT_START_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, + + head_comment: head_comment, } } else if token.typ != yaml_STREAM_END_TOKEN { @@ -284,6 +363,7 @@ func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event if token == nil { return false } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN || token.typ == yaml_DOCUMENT_START_TOKEN || @@ -327,9 +407,25 @@ func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) end_mark: end_mark, implicit: implicit, } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } return true } +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + // Parse the productions: // block_node_or_indentless_sequence ::= // ALIAS @@ -373,6 +469,7 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i end_mark: token.end_mark, anchor: token.value, } + yaml_parser_set_event_comments(parser, event) skip_token(parser) return true } @@ -486,6 +583,7 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i quoted_implicit: quoted_implicit, style: yaml_style_t(token.style), } + yaml_parser_set_event_comments(parser, event) skip_token(parser) return true } @@ -502,6 +600,7 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), } + yaml_parser_set_event_comments(parser, event) return true } if token.typ == yaml_FLOW_MAPPING_START_TOKEN { @@ -516,6 +615,7 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), } + yaml_parser_set_event_comments(parser, event) return true } if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { @@ -530,6 +630,10 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } return true } if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { @@ -544,6 +648,10 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } return true } if len(anchor) > 0 || len(tag) > 0 { @@ -579,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -590,7 +701,9 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false @@ -636,7 +749,9 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false @@ -662,6 +777,32 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y return true } +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // ******************* @@ -675,6 +816,9 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -684,6 +828,19 @@ func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_even return false } + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + if token.typ == yaml_KEY_TOKEN { mark := token.end_mark skip_token(parser) @@ -709,6 +866,7 @@ func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_even start_mark: token.start_mark, end_mark: token.end_mark, } + yaml_parser_set_event_comments(parser, event) skip_token(parser) return true } @@ -770,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -820,6 +981,7 @@ func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_ev start_mark: token.start_mark, end_mark: token.end_mark, } + yaml_parser_set_event_comments(parser, event) skip_token(parser) return true @@ -959,6 +1121,7 @@ func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event start_mark: token.start_mark, end_mark: token.end_mark, } + yaml_parser_set_event_comments(parser, event) skip_token(parser) return true } diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go similarity index 91% rename from vendor/gopkg.in/yaml.v2/readerc.go rename to vendor/gopkg.in/yaml.v3/readerc.go index 7c1f5fa..b7de0a8 100644 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ b/vendor/gopkg.in/yaml.v3/readerc.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml import ( @@ -95,7 +117,7 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { // [Go] This function was changed to guarantee the requested length size at EOF. // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests + // for that to be the case, and there are tests // If the EOF flag is set and the raw buffer is empty, do nothing. if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go similarity index 60% rename from vendor/gopkg.in/yaml.v2/resolve.go rename to vendor/gopkg.in/yaml.v3/resolve.go index 4120e0c..64ae888 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v3/resolve.go @@ -1,3 +1,18 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package yaml import ( @@ -34,18 +49,14 @@ func init() { tag string l []string }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, } m := resolveMap @@ -56,11 +67,37 @@ func init() { } } +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + const longTagPrefix = "tag:yaml.org,2002:" func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } return "!!" + tag[len(longTagPrefix):] } return tag @@ -68,6 +105,9 @@ func shortTag(tag string) string { func longTag(tag string) string { if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } return longTagPrefix + tag[2:] } return tag @@ -75,7 +115,7 @@ func longTag(tag string) string { func resolvableTag(tag string) bool { switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: return true } return false @@ -84,23 +124,24 @@ func resolvableTag(tag string) bool { var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) if !resolvableTag(tag) { return tag, in } defer func() { switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + case "", rtag, strTag, binaryTag: return - case yaml_FLOAT_TAG: - if rtag == yaml_INT_TAG { + case floatTag: + if rtag == intTag { switch v := out.(type) { case int64: - rtag = yaml_FLOAT_TAG + rtag = floatTag out = float64(v) return case int: - rtag = yaml_FLOAT_TAG + rtag = floatTag out = float64(v) return } @@ -115,7 +156,7 @@ func resolve(tag string, in string) (rtag string, out interface{}) { if in != "" { hint = resolveTable[in[0]] } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + if hint != 0 && tag != strTag && tag != binaryTag { // Handle things we can lookup in a map. if item, ok := resolveMap[in]; ok { return item.tag, item.value @@ -133,17 +174,17 @@ func resolve(tag string, in string) (rtag string, out interface{}) { // Not in the map, so maybe a normal float. floatv, err := strconv.ParseFloat(in, 64) if err == nil { - return yaml_FLOAT_TAG, floatv + return floatTag, floatv } case 'D', 'S': // Int, float, or timestamp. // Only try values as a timestamp if the value is unquoted or there's an explicit // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { + if tag == "" || tag == timestampTag { t, ok := parseTimestamp(in) if ok { - return yaml_TIMESTAMP_TAG, t + return timestampTag, t } } @@ -151,49 +192,76 @@ func resolve(tag string, in string) (rtag string, out interface{}) { intv, err := strconv.ParseInt(plain, 0, 64) if err == nil { if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) + return intTag, int(intv) } else { - return yaml_INT_TAG, intv + return intTag, intv } } uintv, err := strconv.ParseUint(plain, 0, 64) if err == nil { - return yaml_INT_TAG, uintv + return intTag, uintv } if yamlStyleFloat.MatchString(plain) { floatv, err := strconv.ParseFloat(plain, 64) if err == nil { - return yaml_FLOAT_TAG, floatv + return floatTag, floatv } } if strings.HasPrefix(plain, "0b") { intv, err := strconv.ParseInt(plain[2:], 2, 64) if err == nil { if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) + return intTag, int(intv) } else { - return yaml_INT_TAG, intv + return intTag, intv } } uintv, err := strconv.ParseUint(plain[2:], 2, 64) if err == nil { - return yaml_INT_TAG, uintv + return intTag, uintv } } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) if err == nil { if true || intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) + return intTag, int(intv) } else { - return yaml_INT_TAG, intv + return intTag, intv } } } default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") } } - return yaml_STR_TAG, in + return strTag, in } // encodeBase64 encodes s as base64 that is broken up into multiple lines diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go similarity index 87% rename from vendor/gopkg.in/yaml.v2/scannerc.go rename to vendor/gopkg.in/yaml.v3/scannerc.go index 0b9bb60..ca00701 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml import ( @@ -489,6 +511,9 @@ func cache(parser *yaml_parser_t, length int) bool { // Advance the buffer pointer. func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } parser.mark.index++ parser.mark.column++ parser.unread-- @@ -502,17 +527,22 @@ func skip_line(parser *yaml_parser_t) { parser.mark.line++ parser.unread -= 2 parser.buffer_pos += 2 + parser.newlines++ } else if is_break(parser.buffer, parser.buffer_pos) { parser.mark.index++ parser.mark.column = 0 parser.mark.line++ parser.unread-- parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ } } // Copy a character to a string buffer and advance pointers. func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } w := width(parser.buffer[parser.buffer_pos]) if w == 0 { panic("invalid character sequence") @@ -564,6 +594,7 @@ func read_line(parser *yaml_parser_t, s []byte) []byte { parser.mark.column = 0 parser.mark.line++ parser.unread-- + parser.newlines++ return s } @@ -626,9 +657,13 @@ func trace(args ...interface{}) func() { func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { // While we need more tokens to fetch, do it. for { - if parser.tokens_head != len(parser.tokens) { - // If queue is non-empty, check if any potential simple key may - // occupy the head position. + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] if !ok { break @@ -649,7 +684,7 @@ func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { } // The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { // Ensure that the buffer is initialized. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false @@ -660,13 +695,19 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { return yaml_parser_fetch_stream_start(parser) } + scan_mark := parser.mark + // Eat whitespaces and comments until we reach the next token. if !yaml_parser_scan_to_next_token(parser) { return false } + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { return false } @@ -699,6 +740,26 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) } + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + // Is it the flow sequence start indicator? if buf[pos] == '[' { return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) @@ -792,7 +853,7 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { // if it is followed by a non-space character. // // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. + // [Go] TODO Make this logic more reasonable. //switch parser.buffer[parser.buffer_pos] { //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': //} @@ -965,19 +1026,49 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml // Pop indentation levels from the indents stack until the current level // becomes less or equal to the column. For each indentation level, append // the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { // In the flow context, do nothing. if parser.flow_level > 0 { return true } + block_mark := scan_mark + block_mark.index-- + // Loop through the indentation levels in the stack. for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + // Create a token and append it to the queue. token := yaml_token_t{ typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, + start_mark: block_mark, + end_mark: block_mark, } yaml_insert_token(parser, -1, &token) @@ -1026,7 +1117,7 @@ func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { } // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { return false } @@ -1050,7 +1141,7 @@ func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { // Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { return false } @@ -1074,7 +1165,7 @@ func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { // Produce the DOCUMENT-START or DOCUMENT-END token. func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { return false } @@ -1107,6 +1198,7 @@ func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_ // Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. if !yaml_parser_save_simple_key(parser) { return false @@ -1442,6 +1534,8 @@ func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { // Eat whitespaces and comments until the next token is found. func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + scan_mark := parser.mark + // Until the next token is not found. for { // Allow the BOM mark to start a line. @@ -1468,13 +1562,33 @@ func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { } } + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + // Eat a comment until a line break. if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } + if !yaml_parser_scan_comments(parser, scan_mark) { + return false } } @@ -1572,6 +1686,10 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool } if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -1987,7 +2105,7 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', // '%'. - // [Go] Convert this into more reasonable logic. + // [Go] TODO Convert this into more reasonable logic. for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || @@ -2142,6 +2260,9 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l } } if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -2709,3 +2830,209 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b } return true } + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line-parser.newlines+1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go similarity index 76% rename from vendor/gopkg.in/yaml.v2/sorter.go rename to vendor/gopkg.in/yaml.v3/sorter.go index 4c45e66..9210ece 100644 --- a/vendor/gopkg.in/yaml.v2/sorter.go +++ b/vendor/gopkg.in/yaml.v3/sorter.go @@ -1,3 +1,18 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package yaml import ( @@ -37,8 +52,10 @@ func (l keyList) Less(i, j int) bool { return ak < bk } ar, br := []rune(a.String()), []rune(b.String()) + digits := false for i := 0; i < len(ar) && i < len(br); i++ { if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) continue } al := unicode.IsLetter(ar[i]) @@ -47,12 +64,16 @@ func (l keyList) Less(i, j int) bool { return ar[i] < br[i] } if al || bl { - return bl + if digits { + return al + } else { + return bl + } } var ai, bi int var an, bn int64 if ar[i] == '0' || br[i] == '0' { - for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { if ar[j] != '0' { an = 1 bn = 1 diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go new file mode 100644 index 0000000..b8a116b --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go similarity index 55% rename from vendor/gopkg.in/yaml.v2/yaml.go rename to vendor/gopkg.in/yaml.v3/yaml.go index 3081388..8cec6da 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -1,3 +1,18 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Package yaml implements YAML support for the Go language. // // Source code and other details for the project are available at GitHub: @@ -13,23 +28,16 @@ import ( "reflect" "strings" "sync" + "unicode/utf8" ) -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - // The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. +// behavior when being unmarshaled from a YAML document. type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { UnmarshalYAML(unmarshal func(interface{}) error) error } @@ -81,18 +89,10 @@ func Unmarshal(in []byte, out interface{}) (err error) { return unmarshal(in, out, false) } -// UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members, or mapping -// keys that are duplicates, will result in -// an error. -func UnmarshalStrict(in []byte, out interface{}) (err error) { - return unmarshal(in, out, true) -} - // A Decoder reads and decodes YAML values from an input stream. type Decoder struct { - strict bool - parser *parser + parser *parser + knownFields bool } // NewDecoder returns a new decoder that reads from r. @@ -105,10 +105,10 @@ func NewDecoder(r io.Reader) *Decoder { } } -// SetStrict sets whether strict decoding behaviour is enabled when -// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. -func (dec *Decoder) SetStrict(strict bool) { - dec.strict = strict +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable } // Decode reads the next YAML-encoded value from its input @@ -117,7 +117,8 @@ func (dec *Decoder) SetStrict(strict bool) { // See the documentation for Unmarshal for details about the // conversion of YAML into a Go value. func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder(dec.strict) + d := newDecoder() + d.knownFields = dec.knownFields defer handleErr(&err) node := dec.parser.parse() if node == nil { @@ -134,9 +135,27 @@ func (dec *Decoder) Decode(v interface{}) (err error) { return nil } +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + func unmarshal(in []byte, out interface{}, strict bool) (err error) { defer handleErr(&err) - d := newDecoder(strict) + d := newDecoder() p := newParser(in) defer p.destroy() node := p.parse() @@ -233,6 +252,32 @@ func (e *Encoder) Encode(v interface{}) (err error) { return nil } +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + // Close closes the encoder by writing any remaining data. // It does not write a stream terminating string "...". func (e *Encoder) Close() (err error) { @@ -275,6 +320,168 @@ func (e *TypeError) Error() string { return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) } +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +// +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + // -------------------------------------------------------------------------- // Maintain a mapping of keys to structure field indexes @@ -289,6 +496,10 @@ type structInfo struct { // InlineMap is the number of the field in the struct that // contains an ,inline map, or -1 if there's none. InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int } type fieldInfo struct { @@ -306,6 +517,12 @@ type fieldInfo struct { var structMap = make(map[reflect.Type]*structInfo) var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} func getStructInfo(st reflect.Type) (*structInfo, error) { fieldMapMutex.RLock() @@ -319,6 +536,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { fieldsMap := make(map[string]fieldInfo) fieldsList := make([]fieldInfo, 0, n) inlineMap := -1 + inlineUnmarshalers := [][]int(nil) for i := 0; i != n; i++ { field := st.Field(i) if field.PkgPath != "" && !field.Anonymous { @@ -347,7 +565,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { case "inline": inline = true default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) } } tag = fields[0] @@ -357,34 +575,47 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { switch field.Type.Kind() { case reflect.Map: if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + return nil, errors.New("multiple ,inline maps in struct " + st.String()) } if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) } inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) } default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") + return nil, errors.New("option ,inline may only be used on a struct or map field") } continue } @@ -396,7 +627,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { } if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + msg := "duplicated key '" + info.Key + "' in struct " + st.String() return nil, errors.New(msg) } @@ -406,9 +637,10 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { } sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, } fieldMapMutex.Lock() @@ -464,15 +696,3 @@ func isZero(v reflect.Value) bool { } return false } - -// FutureLineWrap globally disables line wrapping when encoding long strings. -// This is a temporary and thus deprecated method introduced to faciliate -// migration towards v3, which offers more control of line lengths on -// individual encodings, and has a default matching the behavior introduced -// by this function. -// -// The default formatting of v2 was erroneously changed in v2.3.0 and reverted -// in v2.4.0, at which point this function was introduced to help migration. -func FutureLineWrap() { - disableLineWrapping = true -} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go similarity index 88% rename from vendor/gopkg.in/yaml.v2/yamlh.go rename to vendor/gopkg.in/yaml.v3/yamlh.go index f6a9c8e..7c6d007 100644 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml import ( @@ -73,13 +95,13 @@ type yaml_scalar_style_t yaml_style_t // Scalar styles. const ( // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. ) type yaml_sequence_style_t yaml_style_t @@ -238,6 +260,7 @@ const ( yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. yaml_MAPPING_START_EVENT // A MAPPING-START event. yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT ) var eventStrings = []string{ @@ -252,6 +275,7 @@ var eventStrings = []string{ yaml_SEQUENCE_END_EVENT: "sequence end", yaml_MAPPING_START_EVENT: "mapping start", yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", } func (e yaml_event_type_t) String() string { @@ -279,6 +303,12 @@ type yaml_event_t struct { // The list of tag directives (for yaml_DOCUMENT_START_EVENT). tag_directives []yaml_tag_directive_t + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). anchor []byte @@ -554,6 +584,8 @@ type yaml_parser_t struct { unread int // The number of unread characters in the buffer. + newlines int // The number of line breaks since last non-break/non-blank character + raw_buffer []byte // The raw buffer. raw_buffer_pos int // The current position of the buffer. @@ -562,6 +594,17 @@ type yaml_parser_t struct { offset int // The offset of the current position (in bytes). mark yaml_mark_t // The mark of the current position. + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + // Scanner stuff stream_start_produced bool // Have we started to scan the input stream? @@ -595,6 +638,18 @@ type yaml_parser_t struct { document *yaml_document_t // The currently parsed document. } +type yaml_comment_t struct { + + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + // Emitter Definitions // The prototype of a write handler. @@ -625,8 +680,10 @@ const ( yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. @@ -698,6 +755,9 @@ type yaml_emitter_t struct { indention bool // If the last character was an indentation character (' ', '-', '?', ':')? open_ended bool // If an explicit document end is required? + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + // Anchor analysis. anchor_data struct { anchor []byte // The anchor value. @@ -721,6 +781,14 @@ type yaml_emitter_t struct { style yaml_scalar_style_t // The output style. } + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + // Dumper stuff opened bool // If the stream was already opened? diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go similarity index 78% rename from vendor/gopkg.in/yaml.v2/yamlprivateh.go rename to vendor/gopkg.in/yaml.v3/yamlprivateh.go index 8110ce3..e88f9c5 100644 --- a/vendor/gopkg.in/yaml.v2/yamlprivateh.go +++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml const ( @@ -114,8 +136,9 @@ func is_crlf(b []byte, i int) bool { // Check if the character is a line break or NUL. func is_breakz(b []byte, i int) bool { //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) @@ -127,8 +150,9 @@ func is_breakz(b []byte, i int) bool { // Check if the character is a line break, space, or NUL. func is_spacez(b []byte, i int) bool { //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || + return ( + // is_space: + b[i] == ' ' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) @@ -141,8 +165,9 @@ func is_spacez(b []byte, i int) bool { // Check if the character is a line break, space, tab, or NUL. func is_blankz(b []byte, i int) bool { //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) diff --git a/vendor/modules.txt b/vendor/modules.txt index 54e193b..98be46f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,2 +1,12 @@ -# gopkg.in/yaml.v2 v2.4.0 -gopkg.in/yaml.v2 +# github.com/VeritasOS/plugin-manager v1.0.4 +github.com/VeritasOS/plugin-manager +github.com/VeritasOS/plugin-manager/config +github.com/VeritasOS/plugin-manager/graph +github.com/VeritasOS/plugin-manager/types +github.com/VeritasOS/plugin-manager/types/runtime +github.com/VeritasOS/plugin-manager/types/status +github.com/VeritasOS/plugin-manager/utils/log +github.com/VeritasOS/plugin-manager/utils/os +github.com/VeritasOS/plugin-manager/utils/output +# gopkg.in/yaml.v3 v3.0.1 +gopkg.in/yaml.v3