diff --git a/.codespellignore b/.codespellignore new file mode 100644 index 000000000..e2e28306d --- /dev/null +++ b/.codespellignore @@ -0,0 +1,6 @@ +aks +AfterAll +CROs +NotIn +fo +allReady \ No newline at end of file diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 000000000..083f4c4f7 --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,24 @@ +# GitHub Action to automate the identification of common misspellings in text files. +# https://github.com/codespell-project/actions-codespell +# https://github.com/codespell-project/codespell +name: codespell +on: [pull_request] +permissions: + contents: read + +jobs: + codespell: + name: Check for spelling errors + runs-on: ubuntu-latest + steps: + - name: Harden Runner + uses: step-security/harden-runner@91182cccc01eb5e619899d80e4e971d6181294a7 # v2.10.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # master + with: + check_filenames: true + skip: ./.git,./.github/workflows/codespell.yml,.git,*.png,*.jpg,*.svg,*.sum,./vendor,go.sum,testdata + ignore_words_file: .codespellignore diff --git a/apis/cluster/v1/membercluster_types.go b/apis/cluster/v1/membercluster_types.go index d4d11ef8d..2bb600115 100644 --- a/apis/cluster/v1/membercluster_types.go +++ b/apis/cluster/v1/membercluster_types.go @@ -196,7 +196,7 @@ func (m *MemberCluster) RemoveCondition(conditionType string) { // GetAgentStatus retrieves the status of a specific member agent from the MemberCluster object. // -// If the specificed agent does not exist, or it has not updated its status with the hub cluster +// If the specified agent does not exist, or it has not updated its status with the hub cluster // yet, this function returns nil. func (m *MemberCluster) GetAgentStatus(agentType AgentType) *AgentStatus { for _, s := range m.Status.AgentStatus { diff --git a/apis/cluster/v1beta1/membercluster_types.go b/apis/cluster/v1beta1/membercluster_types.go index 1a5417ea4..574e63f03 100644 --- a/apis/cluster/v1beta1/membercluster_types.go +++ b/apis/cluster/v1beta1/membercluster_types.go @@ -197,7 +197,7 @@ func (m *MemberCluster) RemoveCondition(conditionType string) { // GetAgentStatus retrieves the status of a specific member agent from the MemberCluster object. // -// If the specificed agent does not exist, or it has not updated its status with the hub cluster +// If the specified agent does not exist, or it has not updated its status with the hub cluster // yet, this function returns nil. func (m *MemberCluster) GetAgentStatus(agentType AgentType) *AgentStatus { for _, s := range m.Status.AgentStatus { diff --git a/apis/placement/v1/clusterresourceplacement_types.go b/apis/placement/v1/clusterresourceplacement_types.go index 4c0a748db..34679629a 100644 --- a/apis/placement/v1/clusterresourceplacement_types.go +++ b/apis/placement/v1/clusterresourceplacement_types.go @@ -17,7 +17,7 @@ const ( // that the CRP controller can react to CRP deletions if necessary. ClusterResourcePlacementCleanupFinalizer = fleetPrefix + "crp-cleanup" - // SchedulerCRPCleanupFinalizer is a finalizer addd by the scheduler to CRPs, to make sure + // SchedulerCRPCleanupFinalizer is a finalizer added by the scheduler to CRPs, to make sure // that all bindings derived from a CRP can be cleaned up after the CRP is deleted. SchedulerCRPCleanupFinalizer = fleetPrefix + "scheduler-cleanup" ) diff --git a/apis/placement/v1/work_types.go b/apis/placement/v1/work_types.go index fa8ce76f2..a20d10aa7 100644 --- a/apis/placement/v1/work_types.go +++ b/apis/placement/v1/work_types.go @@ -58,7 +58,7 @@ type WorkSpec struct { // WorkloadTemplate represents the manifest workload to be deployed on spoke cluster type WorkloadTemplate struct { - // Manifests represents a list of kuberenetes resources to be deployed on the spoke cluster. + // Manifests represents a list of kubernetes resources to be deployed on the spoke cluster. // +optional Manifests []Manifest `json:"manifests,omitempty"` } @@ -75,7 +75,7 @@ type WorkStatus struct { // Conditions contains the different condition statuses for this work. // Valid condition types are: // 1. Applied represents workload in Work is applied successfully on the spoke cluster. - // 2. Progressing represents workload in Work in the trasitioning from one state to another the on the spoke cluster. + // 2. Progressing represents workload in Work in the transitioning from one state to another the on the spoke cluster. // 3. Available represents workload in Work exists on the spoke cluster. // 4. Degraded represents the current state of workload does not match the desired // state for a certain period. @@ -91,7 +91,7 @@ type WorkStatus struct { // Renamed original "ResourceIdentifier" so that it won't conflict with ResourceIdentifier defined in the clusterresourceplacement_types.go. type WorkResourceIdentifier struct { // Ordinal represents an index in manifests list, so the condition can still be linked - // to a manifest even thougth manifest cannot be parsed successfully. + // to a manifest even though manifest cannot be parsed successfully. Ordinal int `json:"ordinal"` // Group is the group of the resource. diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index 01c8b4552..7e6c9aadb 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -17,7 +17,7 @@ const ( // that the CRP controller can react to CRP deletions if necessary. ClusterResourcePlacementCleanupFinalizer = fleetPrefix + "crp-cleanup" - // SchedulerCRPCleanupFinalizer is a finalizer addd by the scheduler to CRPs, to make sure + // SchedulerCRPCleanupFinalizer is a finalizer added by the scheduler to CRPs, to make sure // that all bindings derived from a CRP can be cleaned up after the CRP is deleted. SchedulerCRPCleanupFinalizer = fleetPrefix + "scheduler-cleanup" ) @@ -515,7 +515,7 @@ type ApplyStrategy struct { // performs a client-side apply. This is the default option. // // Note that this strategy requires that Fleet keep the last applied configuration in the - // annoation of an applied resource. If the object gets so large that apply ops can no longer + // annotation of an applied resource. If the object gets so large that apply ops can no longer // be executed, Fleet will switch to server-side apply. // // Use ComparisonOption and WhenToApply settings to control when an apply op can be executed. @@ -531,7 +531,7 @@ type ApplyStrategy struct { // Use ComparisonOption and WhenToApply settings to control when an apply op can be executed. // // * ReportDiff: Fleet will compare the desired state of a resource as kept in the hub cluster - // with its current state (if appliable) on the member cluster side, and report any + // with its current state (if applicable) on the member cluster side, and report any // differences. No actual apply ops would be executed, and resources will be left alone as they // are on the member clusters. // diff --git a/apis/placement/v1beta1/work_types.go b/apis/placement/v1beta1/work_types.go index de054ca71..2fc88f2e6 100644 --- a/apis/placement/v1beta1/work_types.go +++ b/apis/placement/v1beta1/work_types.go @@ -57,7 +57,7 @@ type WorkSpec struct { // WorkloadTemplate represents the manifest workload to be deployed on spoke cluster type WorkloadTemplate struct { - // Manifests represents a list of kuberenetes resources to be deployed on the spoke cluster. + // Manifests represents a list of kubernetes resources to be deployed on the spoke cluster. // +optional Manifests []Manifest `json:"manifests,omitempty"` } @@ -74,7 +74,7 @@ type WorkStatus struct { // Conditions contains the different condition statuses for this work. // Valid condition types are: // 1. Applied represents workload in Work is applied successfully on the spoke cluster. - // 2. Progressing represents workload in Work in the trasitioning from one state to another the on the spoke cluster. + // 2. Progressing represents workload in Work in the transitioning from one state to another the on the spoke cluster. // 3. Available represents workload in Work exists on the spoke cluster. // 4. Degraded represents the current state of workload does not match the desired // state for a certain period. @@ -90,7 +90,7 @@ type WorkStatus struct { // Renamed original "ResourceIdentifier" so that it won't conflict with ResourceIdentifier defined in the clusterresourceplacement_types.go. type WorkResourceIdentifier struct { // Ordinal represents an index in manifests list, so the condition can still be linked - // to a manifest even thougth manifest cannot be parsed successfully. + // to a manifest even though manifest cannot be parsed successfully. Ordinal int `json:"ordinal"` // Group is the group of the resource. diff --git a/cmd/hubagent/options/options.go b/cmd/hubagent/options/options.go index 2b2067ece..ceb2f2375 100644 --- a/cmd/hubagent/options/options.go +++ b/cmd/hubagent/options/options.go @@ -108,7 +108,7 @@ func (o *Options) AddFlags(flags *flag.FlagSet) { flags.DurationVar(&o.LeaderElection.LeaseDuration.Duration, "leader-lease-duration", 15*time.Second, "This is effectively the maximum duration that a leader can be stopped before someone else will replace it.") flag.StringVar(&o.LeaderElection.ResourceNamespace, "leader-election-namespace", utils.FleetSystemNamespace, "The namespace in which the leader election resource will be created.") flag.BoolVar(&o.EnableWebhook, "enable-webhook", true, "If set, the fleet webhook is enabled.") - // set a defautl value 'fleetwebhook' for webhook service name for backward compatibility. The service name was hard coded to 'fleetwebhook' in the past. + // set a default value 'fleetwebhook' for webhook service name for backward compatibility. The service name was hard coded to 'fleetwebhook' in the past. flag.StringVar(&o.WebhookServiceName, "webhook-service-name", "fleetwebhook", "Fleet webhook service name.") flag.BoolVar(&o.EnableGuardRail, "enable-guard-rail", false, "If set, the fleet guard rail webhook configurations are enabled.") flag.StringVar(&o.WhiteListedUsers, "whitelisted-users", "", "If set, white listed users can modify fleet related resources.") diff --git a/cmd/memberagent/main_test.go b/cmd/memberagent/main_test.go index 6e2cc613d..d729a6fba 100644 --- a/cmd/memberagent/main_test.go +++ b/cmd/memberagent/main_test.go @@ -89,13 +89,13 @@ func Test_buildHubConfig(t *testing.T) { assert.Nil(t, config) assert.NotNil(t, err) }) - t.Run("use token auth, no toke path - error", func(t *testing.T) { + t.Run("use token auth, no token path - error", func(t *testing.T) { t.Setenv("CONFIG_PATH", "") config, err := buildHubConfig("https://hub.domain.com", false, false) assert.Nil(t, config) assert.NotNil(t, err) }) - t.Run("use token auth, not exists toke path - error", func(t *testing.T) { + t.Run("use token auth, not exists token path - error", func(t *testing.T) { t.Setenv("CONFIG_PATH", "/hot/exists/token/path") config, err := buildHubConfig("https://hub.domain.com", false, false) assert.Nil(t, config) diff --git a/config/crd/bases/multicluster.x-k8s.io_appliedworks.yaml b/config/crd/bases/multicluster.x-k8s.io_appliedworks.yaml index 19105743f..6b5430b29 100644 --- a/config/crd/bases/multicluster.x-k8s.io_appliedworks.yaml +++ b/config/crd/bases/multicluster.x-k8s.io_appliedworks.yaml @@ -89,7 +89,7 @@ spec: type: string ordinal: description: Ordinal represents an index in manifests list, - so the condition can still be linked to a manifest even thougth + so the condition can still be linked to a manifest even though manifest cannot be parsed successfully. type: integer resource: diff --git a/config/crd/bases/multicluster.x-k8s.io_works.yaml b/config/crd/bases/multicluster.x-k8s.io_works.yaml index dd4eb47d0..a98e638bd 100644 --- a/config/crd/bases/multicluster.x-k8s.io_works.yaml +++ b/config/crd/bases/multicluster.x-k8s.io_works.yaml @@ -40,7 +40,7 @@ spec: on spoke cluster properties: manifests: - description: Manifests represents a list of kuberenetes resources + description: Manifests represents a list of kubernetes resources to be deployed on the spoke cluster. items: description: Manifest represents a resource to be deployed on @@ -59,7 +59,7 @@ spec: description: 'Conditions contains the different condition statuses for this work. Valid condition types are: 1. Applied represents workload in Work is applied successfully on the spoke cluster. 2. - Progressing represents workload in Work in the trasitioning from + Progressing represents workload in Work in the transitioning from one state to another the on the spoke cluster. 3. Available represents workload in Work exists on the spoke cluster. 4. Degraded represents the current state of workload does not match the desired state for @@ -233,7 +233,7 @@ spec: ordinal: description: Ordinal represents an index in manifests list, so the condition can still be linked to a manifest even - thougth manifest cannot be parsed successfully. + though manifest cannot be parsed successfully. type: integer resource: description: Resource is the resource type of the resource diff --git a/config/crd/bases/placement.kubernetes-fleet.io_appliedworks.yaml b/config/crd/bases/placement.kubernetes-fleet.io_appliedworks.yaml index 940ac0082..126d7a161 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_appliedworks.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_appliedworks.yaml @@ -95,7 +95,7 @@ spec: ordinal: description: |- Ordinal represents an index in manifests list, so the condition can still be linked - to a manifest even thougth manifest cannot be parsed successfully. + to a manifest even though manifest cannot be parsed successfully. type: integer resource: description: Resource is the resource type of the resource @@ -199,7 +199,7 @@ spec: ordinal: description: |- Ordinal represents an index in manifests list, so the condition can still be linked - to a manifest even thougth manifest cannot be parsed successfully. + to a manifest even though manifest cannot be parsed successfully. type: integer resource: description: Resource is the resource type of the resource diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourcebindings.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourcebindings.yaml index 2a09a6ebf..111542c7c 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourcebindings.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourcebindings.yaml @@ -565,7 +565,7 @@ spec: Note that this strategy requires that Fleet keep the last applied configuration in the - annoation of an applied resource. If the object gets so large that apply ops can no longer + annotation of an applied resource. If the object gets so large that apply ops can no longer be executed, Fleet will switch to server-side apply. @@ -585,7 +585,7 @@ spec: * ReportDiff: Fleet will compare the desired state of a resource as kept in the hub cluster - with its current state (if appliable) on the member cluster side, and report any + with its current state (if applicable) on the member cluster side, and report any differences. No actual apply ops would be executed, and resources will be left alone as they are on the member clusters. diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml index 3ef0d05dd..ba2717a3f 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml @@ -1893,7 +1893,7 @@ spec: Note that this strategy requires that Fleet keep the last applied configuration in the - annoation of an applied resource. If the object gets so large that apply ops can no longer + annotation of an applied resource. If the object gets so large that apply ops can no longer be executed, Fleet will switch to server-side apply. @@ -1913,7 +1913,7 @@ spec: * ReportDiff: Fleet will compare the desired state of a resource as kept in the hub cluster - with its current state (if appliable) on the member cluster side, and report any + with its current state (if applicable) on the member cluster side, and report any differences. No actual apply ops would be executed, and resources will be left alone as they are on the member clusters. diff --git a/config/crd/bases/placement.kubernetes-fleet.io_works.yaml b/config/crd/bases/placement.kubernetes-fleet.io_works.yaml index fd318bbed..656b5ad92 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_works.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_works.yaml @@ -87,7 +87,7 @@ spec: on spoke cluster properties: manifests: - description: Manifests represents a list of kuberenetes resources + description: Manifests represents a list of kubernetes resources to be deployed on the spoke cluster. items: description: Manifest represents a resource to be deployed on @@ -107,7 +107,7 @@ spec: Conditions contains the different condition statuses for this work. Valid condition types are: 1. Applied represents workload in Work is applied successfully on the spoke cluster. - 2. Progressing represents workload in Work in the trasitioning from one state to another the on the spoke cluster. + 2. Progressing represents workload in Work in the transitioning from one state to another the on the spoke cluster. 3. Available represents workload in Work exists on the spoke cluster. 4. Degraded represents the current state of workload does not match the desired state for a certain period. @@ -283,7 +283,7 @@ spec: ordinal: description: |- Ordinal represents an index in manifests list, so the condition can still be linked - to a manifest even thougth manifest cannot be parsed successfully. + to a manifest even though manifest cannot be parsed successfully. type: integer resource: description: Resource is the resource type of the resource @@ -473,7 +473,7 @@ spec: Note that this strategy requires that Fleet keep the last applied configuration in the - annoation of an applied resource. If the object gets so large that apply ops can no longer + annotation of an applied resource. If the object gets so large that apply ops can no longer be executed, Fleet will switch to server-side apply. @@ -493,7 +493,7 @@ spec: * ReportDiff: Fleet will compare the desired state of a resource as kept in the hub cluster - with its current state (if appliable) on the member cluster side, and report any + with its current state (if applicable) on the member cluster side, and report any differences. No actual apply ops would be executed, and resources will be left alone as they are on the member clusters. @@ -565,7 +565,7 @@ spec: on spoke cluster properties: manifests: - description: Manifests represents a list of kuberenetes resources + description: Manifests represents a list of kubernetes resources to be deployed on the spoke cluster. items: description: Manifest represents a resource to be deployed on @@ -585,7 +585,7 @@ spec: Conditions contains the different condition statuses for this work. Valid condition types are: 1. Applied represents workload in Work is applied successfully on the spoke cluster. - 2. Progressing represents workload in Work in the trasitioning from one state to another the on the spoke cluster. + 2. Progressing represents workload in Work in the transitioning from one state to another the on the spoke cluster. 3. Available represents workload in Work exists on the spoke cluster. 4. Degraded represents the current state of workload does not match the desired state for a certain period. @@ -761,7 +761,7 @@ spec: ordinal: description: |- Ordinal represents an index in manifests list, so the condition can still be linked - to a manifest even thougth manifest cannot be parsed successfully. + to a manifest even though manifest cannot be parsed successfully. type: integer resource: description: Resource is the resource type of the resource diff --git a/docs/api-references.md b/docs/api-references.md index 23df83c8f..37373f8ba 100644 --- a/docs/api-references.md +++ b/docs/api-references.md @@ -632,7 +632,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `ordinal` _integer_ | Ordinal represents an index in manifests list, so the condition can still be linked to a manifest even thougth manifest cannot be parsed successfully. | +| `ordinal` _integer_ | Ordinal represents an index in manifests list, so the condition can still be linked to a manifest even though manifest cannot be parsed successfully. | | `group` _string_ | Group is the group of the resource. | | `version` _string_ | Version is the version of the resource. | | `kind` _string_ | Kind is the kind of the resource. | @@ -660,7 +660,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#condition-v1-meta) array_ | Conditions contains the different condition statuses for this work. Valid condition types are: 1. Applied represents workload in Work is applied successfully on the spoke cluster. 2. Progressing represents workload in Work in the trasitioning from one state to another the on the spoke cluster. 3. Available represents workload in Work exists on the spoke cluster. 4. Degraded represents the current state of workload does not match the desired state for a certain period. | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#condition-v1-meta) array_ | Conditions contains the different condition statuses for this work. Valid condition types are: 1. Applied represents workload in Work is applied successfully on the spoke cluster. 2. Progressing represents workload in Work in the transitioning from one state to another the on the spoke cluster. 3. Available represents workload in Work exists on the spoke cluster. 4. Degraded represents the current state of workload does not match the desired state for a certain period. | | `manifestConditions` _[ManifestCondition](#manifestcondition) array_ | ManifestConditions represents the conditions of each resource in work deployed on spoke cluster. | #### WorkloadTemplate @@ -672,4 +672,4 @@ _Appears in:_ | Field | Description | | --- | --- | -| `manifests` _[Manifest](#manifest) array_ | Manifests represents a list of kuberenetes resources to be deployed on the spoke cluster. | +| `manifests` _[Manifest](#manifest) array_ | Manifests represents a list of kubernetes resources to be deployed on the spoke cluster. | diff --git a/docs/getting-started/on-prem.md b/docs/getting-started/on-prem.md index 12f8e8895..a19c5f815 100644 --- a/docs/getting-started/on-prem.md +++ b/docs/getting-started/on-prem.md @@ -7,7 +7,7 @@ of day-to-day Kubernetes management. > Note > > This tutorial assumes that you have some experience of performing administrative tasks for -> Kubernetes clusters. If you are just gettings started with Kubernetes, or do not have much +> Kubernetes clusters. If you are just getting started with Kubernetes, or do not have much > experience of setting up a Kubernetes cluster, it is recommended that you follow the > [Getting started with Fleet using Kind clusters](kind.md) tutorial instead. diff --git a/docs/howtos/taint-toleration.md b/docs/howtos/taint-toleration.md index 232aa1fce..2768020a7 100644 --- a/docs/howtos/taint-toleration.md +++ b/docs/howtos/taint-toleration.md @@ -319,6 +319,6 @@ status: version: v1 ``` -Nothing changes in the status because even if the new taint is not tolerated, the exising resources on the `MemberCluster` +Nothing changes in the status because even if the new taint is not tolerated, the existing resources on the `MemberCluster` will continue to run because the taint effect is `NoSchedule` and the cluster was already selected for resource propagation in a previous scheduling cycle. diff --git a/docs/troubleshooting/clusterResourcePlacementOverridden.md b/docs/troubleshooting/clusterResourcePlacementOverridden.md index 9c830c4f3..d3aecbb18 100644 --- a/docs/troubleshooting/clusterResourcePlacementOverridden.md +++ b/docs/troubleshooting/clusterResourcePlacementOverridden.md @@ -139,7 +139,7 @@ status: ``` The CRP attempted to override a propagated resource utilizing an applicable `ClusterResourceOverrideSnapshot`. However, as the `ClusterResourcePlacementOverridden` condition remains false, looking at the placement status for the cluster -where the condition `Overriden` failed will offer insights into the exact cause of the failure. +where the condition `Overridden` failed will offer insights into the exact cause of the failure. In this situation, the message indicates that the override failed because the path `/metadata/labels/new-label` and its corresponding value are missing. Based on the previous example of the cluster role `secret-reader`, you can see that the path `/metadata/labels/` doesn't exist. This means that `labels` doesn't exist. diff --git a/docs/troubleshooting/clusterResourcePlacementWorkSynchronized.md b/docs/troubleshooting/clusterResourcePlacementWorkSynchronized.md index 7e2e11270..b933e00bf 100644 --- a/docs/troubleshooting/clusterResourcePlacementWorkSynchronized.md +++ b/docs/troubleshooting/clusterResourcePlacementWorkSynchronized.md @@ -6,7 +6,7 @@ The `ClusterResourcePlacementWorkSynchronized` condition is false when the CRP h ## Common Scenarios: Instances where this condition may arise: - The controller encounters an error while trying to generate the corresponding `work` object. -- The enveloped object is not well formated. +- The enveloped object is not well formatted. ### Case Study: The CRP is attempting to propagate a resource to a selected cluster, but the work object has not been updated to reflect the latest changes due to the selected cluster has been terminated. @@ -92,7 +92,7 @@ status: status: "True" type: Overridden - lastTransitionTime: "2024-05-14T18:05:05Z" - message: 'Failed to sychronize the work to the latest: works.placement.kubernetes-fleet.io + message: 'Failed to synchronize the work to the latest: works.placement.kubernetes-fleet.io "crp1-work" is forbidden: unable to create new content in namespace fleet-member-kind-cluster-1 because it is being terminated' observedGeneration: 1 diff --git a/examples/ngnix/config-ro.yaml b/examples/nginx/config-ro.yaml similarity index 100% rename from examples/ngnix/config-ro.yaml rename to examples/nginx/config-ro.yaml diff --git a/examples/ngnix/configMap.yaml b/examples/nginx/configMap.yaml similarity index 100% rename from examples/ngnix/configMap.yaml rename to examples/nginx/configMap.yaml diff --git a/examples/ngnix/deployment.yaml b/examples/nginx/deployment.yaml similarity index 100% rename from examples/ngnix/deployment.yaml rename to examples/nginx/deployment.yaml diff --git a/examples/ngnix/namespace.yaml b/examples/nginx/namespace.yaml similarity index 100% rename from examples/ngnix/namespace.yaml rename to examples/nginx/namespace.yaml diff --git a/examples/ngnix/place-all-crp.yaml b/examples/nginx/place-all-crp.yaml similarity index 100% rename from examples/ngnix/place-all-crp.yaml rename to examples/nginx/place-all-crp.yaml diff --git a/examples/ngnix/service.yaml b/examples/nginx/service.yaml similarity index 100% rename from examples/ngnix/service.yaml rename to examples/nginx/service.yaml diff --git a/hack/Azure/setup/README.md b/hack/Azure/setup/README.md index 0a9abceaf..f0f8a72b6 100644 --- a/hack/Azure/setup/README.md +++ b/hack/Azure/setup/README.md @@ -23,7 +23,7 @@ This how-to guide describes how to create a fleet using Azure Kubernetes Service ## Create a hub cluster from an AKS Cluster For your convenience, Fleet provides a script that can automate the process of creating a hub cluster. To use script, -run the commands bellow: +run the commands below: ```sh # Replace the value of with your Azure subscription ID. export SUB= diff --git a/pkg/authtoken/providers/secret/k8s_secret.go b/pkg/authtoken/providers/secret/k8s_secret.go index 850e32fa0..5444adaf7 100644 --- a/pkg/authtoken/providers/secret/k8s_secret.go +++ b/pkg/authtoken/providers/secret/k8s_secret.go @@ -32,7 +32,7 @@ type secretAuthTokenProvider struct { func New(secretName, namespace string) (interfaces.AuthTokenProvider, error) { client, err := getClient() if err != nil { - return nil, fmt.Errorf("an error occurd will creating client: %w", err) + return nil, fmt.Errorf("an error occurred will creating client: %w", err) } return &secretAuthTokenProvider{ client: client, diff --git a/pkg/controllers/clusterresourceplacement/resource_selector.go b/pkg/controllers/clusterresourceplacement/resource_selector.go index fd88d1af5..94225683a 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector.go +++ b/pkg/controllers/clusterresourceplacement/resource_selector.go @@ -183,7 +183,7 @@ func (r *Reconciler) fetchClusterScopedResources(selector fleetv1beta1.ClusterRe var selectedObjs []runtime.Object objects, err := lister.List(labelSelector) if err != nil { - return nil, controller.NewAPIServerError(true, fmt.Errorf("cannot list all the objets: %w", err)) + return nil, controller.NewAPIServerError(true, fmt.Errorf("cannot list all the objects: %w", err)) } // go ahead and claim all objects by adding a finalizer and insert the placement in its annotation for i := 0; i < len(objects); i++ { @@ -366,7 +366,7 @@ func generateRawContent(object *unstructured.Unstructured) ([]byte, error) { } } if err != nil { - return nil, fmt.Errorf("failed to get the ports field in Serivce object, name =%s: %w", object.GetName(), err) + return nil, fmt.Errorf("failed to get the ports field in Service object, name =%s: %w", object.GetName(), err) } } else if object.GetKind() == "Job" && object.GetAPIVersion() == batchv1.SchemeGroupVersion.String() { if manualSelector, exist, _ := unstructured.NestedBool(object.Object, "spec", "manualSelector"); !exist || !manualSelector { diff --git a/pkg/controllers/internalmembercluster/v1beta1/member_controller.go b/pkg/controllers/internalmembercluster/v1beta1/member_controller.go index 0fd8ad547..dfad1c4b5 100644 --- a/pkg/controllers/internalmembercluster/v1beta1/member_controller.go +++ b/pkg/controllers/internalmembercluster/v1beta1/member_controller.go @@ -356,7 +356,7 @@ func (r *Reconciler) connectToPropertyProvider(ctx context.Context, imc *cluster return nil } -// reportPropertyProviderCollectionCondition reports the condition of whether a properity +// reportPropertyProviderCollectionCondition reports the condition of whether a property // collection attempt has been successful. func reportPropertyProviderCollectionCondition(imc *clusterv1beta1.InternalMemberCluster, status metav1.ConditionStatus, reason, message string) { meta.SetStatusCondition(&imc.Status.Conditions, metav1.Condition{ diff --git a/pkg/controllers/memberclusterplacement/membercluster_controller.go b/pkg/controllers/memberclusterplacement/membercluster_controller.go index 754c1df4b..f7a373105 100644 --- a/pkg/controllers/memberclusterplacement/membercluster_controller.go +++ b/pkg/controllers/memberclusterplacement/membercluster_controller.go @@ -123,7 +123,7 @@ func matchPlacement(placement *fleetv1alpha1.ClusterResourcePlacement, memberClu s, err := metav1.LabelSelectorAsSelector(&clusterSelector.LabelSelector) if err != nil { // should not happen after we have webhooks - klog.ErrorS(err, "found a mal-formated placement", "placement", placementObj, "selector", clusterSelector.LabelSelector) + klog.ErrorS(err, "found a mal-formatted placement", "placement", placementObj, "selector", clusterSelector.LabelSelector) continue } if s.Matches(labels.Set(memberCluster.GetLabels())) { diff --git a/pkg/controllers/work/applied_work_syncer_test.go b/pkg/controllers/work/applied_work_syncer_test.go index 879d95027..2004f1e80 100644 --- a/pkg/controllers/work/applied_work_syncer_test.go +++ b/pkg/controllers/work/applied_work_syncer_test.go @@ -303,10 +303,10 @@ func TestDeleteStaleManifest(t *testing.T) { gotErr := r.deleteStaleManifest(context.Background(), tt.staleManifests, tt.owner) if tt.wantErr == nil { if gotErr != nil { - t.Errorf("test case `%s` didn't return the exepected error, want no error, got error = %+v ", name, gotErr) + t.Errorf("test case `%s` didn't return the expected error, want no error, got error = %+v ", name, gotErr) } } else if gotErr == nil || gotErr.Error() != tt.wantErr.Error() { - t.Errorf("test case `%s` didn't return the exepected error, want error = %+v, got error = %+v", name, tt.wantErr, gotErr) + t.Errorf("test case `%s` didn't return the expected error, want error = %+v, got error = %+v", name, tt.wantErr, gotErr) } }) } diff --git a/pkg/controllers/work/apply_controller.go b/pkg/controllers/work/apply_controller.go index 073051903..0b49e96a9 100644 --- a/pkg/controllers/work/apply_controller.go +++ b/pkg/controllers/work/apply_controller.go @@ -307,7 +307,7 @@ func (r *ApplyWorkReconciler) garbageCollectAppliedWork(ctx context.Context, wor return ctrl.Result{}, r.client.Update(ctx, work, &client.UpdateOptions{}) } -// ensureAppliedWork makes sure that an associated appliedWork and a finalizer on the work resource exsits on the cluster. +// ensureAppliedWork makes sure that an associated appliedWork and a finalizer on the work resource exists on the cluster. func (r *ApplyWorkReconciler) ensureAppliedWork(ctx context.Context, work *fleetv1beta1.Work) (*fleetv1beta1.AppliedWork, error) { workRef := klog.KObj(work) appliedWork := &fleetv1beta1.AppliedWork{} diff --git a/pkg/controllers/work/apply_controller_test.go b/pkg/controllers/work/apply_controller_test.go index a2424c071..5cd2715d4 100644 --- a/pkg/controllers/work/apply_controller_test.go +++ b/pkg/controllers/work/apply_controller_test.go @@ -887,7 +887,7 @@ func TestTrackResourceAvailability(t *testing.T) { expected ApplyAction err error }{ - "Test a mal-formated object": { + "Test a mal-formatted object": { gvr: utils.DeploymentGVR, obj: &unstructured.Unstructured{ Object: map[string]interface{}{ diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index a74ba521e..97bb4720f 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -185,7 +185,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req controllerruntime.Reques Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), Reason: condition.SyncWorkFailedReason, - Message: fmt.Sprintf("Failed to sychronize the work to the latest: %s", errorMessage), + Message: fmt.Sprintf("Failed to synchronize the work to the latest: %s", errorMessage), ObservedGeneration: resourceBinding.Generation, }) } @@ -326,7 +326,7 @@ func (r *Reconciler) ensureFinalizer(ctx context.Context, resourceBinding client // functionally correct, might trigger the work queue rate limiter and eventually lead to // substantial delays in processing. // - // Also note that here default backoff strategy (exponetial backoff) rather than the Kubernetes' + // Also note that here default backoff strategy (exponential backoff) rather than the Kubernetes' // recommended on-write-conflict backoff strategy is used, as experimentation suggests that // this backoff strategy yields better performance, especially for the long-tail latencies. // @@ -891,7 +891,7 @@ func (r *Reconciler) SetupWithManager(mgr controllerruntime.Manager) error { // delete the corresponding resource binding fast. DeleteFunc: func(ctx context.Context, evt event.DeleteEvent, queue workqueue.RateLimitingInterface) { if evt.Object == nil { - klog.ErrorS(controller.NewUnexpectedBehaviorError(fmt.Errorf("deleteEvent %v received with no matadata", evt)), + klog.ErrorS(controller.NewUnexpectedBehaviorError(fmt.Errorf("deleteEvent %v received with no metadata", evt)), "Failed to process a delete event for work object") return } @@ -911,7 +911,7 @@ func (r *Reconciler) SetupWithManager(mgr controllerruntime.Manager) error { // update the corresponding resource binding status fast. UpdateFunc: func(ctx context.Context, evt event.UpdateEvent, queue workqueue.RateLimitingInterface) { if evt.ObjectOld == nil || evt.ObjectNew == nil { - klog.ErrorS(controller.NewUnexpectedBehaviorError(fmt.Errorf("updateEvent %v received with no matadata", evt)), + klog.ErrorS(controller.NewUnexpectedBehaviorError(fmt.Errorf("updateEvent %v received with no metadata", evt)), "Failed to process an update event for work object") return } diff --git a/pkg/controllers/workv1alpha1/applied_work_syncer_test.go b/pkg/controllers/workv1alpha1/applied_work_syncer_test.go index 832a84ce5..eda9df8b5 100644 --- a/pkg/controllers/workv1alpha1/applied_work_syncer_test.go +++ b/pkg/controllers/workv1alpha1/applied_work_syncer_test.go @@ -303,10 +303,10 @@ func TestDeleteStaleManifest(t *testing.T) { gotErr := r.deleteStaleManifest(context.Background(), tt.staleManifests, tt.owner) if tt.wantErr == nil { if gotErr != nil { - t.Errorf("test case `%s` didn't return the exepected error, want no error, got error = %+v ", name, gotErr) + t.Errorf("test case `%s` didn't return the expected error, want no error, got error = %+v ", name, gotErr) } } else if gotErr == nil || gotErr.Error() != tt.wantErr.Error() { - t.Errorf("test case `%s` didn't return the exepected error, want error = %+v, got error = %+v", name, tt.wantErr, gotErr) + t.Errorf("test case `%s` didn't return the expected error, want error = %+v, got error = %+v", name, tt.wantErr, gotErr) } }) } diff --git a/pkg/controllers/workv1alpha1/apply_controller.go b/pkg/controllers/workv1alpha1/apply_controller.go index 21b9da985..41c399d5a 100644 --- a/pkg/controllers/workv1alpha1/apply_controller.go +++ b/pkg/controllers/workv1alpha1/apply_controller.go @@ -240,7 +240,7 @@ func (r *ApplyWorkReconciler) garbageCollectAppliedWork(ctx context.Context, wor return ctrl.Result{}, r.client.Update(ctx, work, &client.UpdateOptions{}) } -// ensureAppliedWork makes sure that an associated appliedWork and a finalizer on the work resource exsits on the cluster. +// ensureAppliedWork makes sure that an associated appliedWork and a finalizer on the work resource exists on the cluster. func (r *ApplyWorkReconciler) ensureAppliedWork(ctx context.Context, work *workv1alpha1.Work) (*workv1alpha1.AppliedWork, error) { workRef := klog.KObj(work) appliedWork := &workv1alpha1.AppliedWork{} diff --git a/pkg/scheduler/clustereligibilitychecker/checker.go b/pkg/scheduler/clustereligibilitychecker/checker.go index dd7f69d27..0d58fd8ef 100644 --- a/pkg/scheduler/clustereligibilitychecker/checker.go +++ b/pkg/scheduler/clustereligibilitychecker/checker.go @@ -120,7 +120,7 @@ func (checker *ClusterEligibilityChecker) IsEligible(cluster *clusterv1beta1.Mem // // Note that here no generation check is performed, as // a) the member cluster object spec is most of the time not touched after creation; and - // b) as long as the heartbeat signal does not timeout, a little drift in genrations + // b) as long as the heartbeat signal does not timeout, a little drift in generations // should not exclude a cluster from resource scheduling. return false, "cluster is not connected to the fleet: member agent not joined yet" } diff --git a/pkg/scheduler/framework/framework.go b/pkg/scheduler/framework/framework.go index f8c4c22d6..1d2ca26ca 100644 --- a/pkg/scheduler/framework/framework.go +++ b/pkg/scheduler/framework/framework.go @@ -96,7 +96,7 @@ type framework struct { // uncachedReader is the uncached read-only client in use by the scheduler framework for accessing // Kubernetes API server; in most cases client should be used instead, unless consistency becomes // a serious concern. - // TO-DO (chenyu1): explore the possbilities of using a mutation cache for better performance. + // TO-DO (chenyu1): explore the possibilities of using a mutation cache for better performance. uncachedReader client.Reader // manager is the controller manager in use by the scheduler framework. manager ctrl.Manager @@ -263,9 +263,9 @@ func (f *framework) RunSchedulingCycleFor(ctx context.Context, crpName string, p // pods to nodes, is more expensive, it is better to avoid over-scheduling in the first place. // // This, of course, has additional performance overhead (and may further exacerbate API server - // overloading). In the long run we might still want to resort to a cached situtation. + // overloading). In the long run we might still want to resort to a cached situation. // - // TO-DO (chenyu1): explore the possbilities of using a mutation cache for better performance. + // TO-DO (chenyu1): explore the possibilities of using a mutation cache for better performance. bindings, err := f.collectBindings(ctx, crpName) if err != nil { klog.ErrorS(err, "Failed to collect bindings", "clusterSchedulingPolicySnapshot", policyRef) @@ -731,7 +731,7 @@ func (f *framework) updatePolicySnapshotStatusFromBindings( // Retrieve the corresponding CRP generation. observedCRPGeneration, err := annotations.ExtractObservedCRPGenerationFromPolicySnapshot(policy) if err != nil { - klog.ErrorS(err, "Failed to retrieve CRP generation from annoation", "clusterSchedulingPolicySnapshot", policyRef) + klog.ErrorS(err, "Failed to retrieve CRP generation from annotation", "clusterSchedulingPolicySnapshot", policyRef) return controller.NewUnexpectedBehaviorError(err) } diff --git a/pkg/scheduler/framework/framework_test.go b/pkg/scheduler/framework/framework_test.go index e1be90892..aeed4174e 100644 --- a/pkg/scheduler/framework/framework_test.go +++ b/pkg/scheduler/framework/framework_test.go @@ -137,7 +137,7 @@ var ( } ) -// TO-DO (chenyu1): expand the test cases as development stablizes. +// TO-DO (chenyu1): expand the test cases as development stabilizes. // TestMain sets up the test environment. func TestMain(m *testing.M) { @@ -3919,7 +3919,7 @@ func TestRunPostBatchPlugins(t *testing.T) { wantBatchLimit: 1, }, { - name: "multple plugins, one success, one error", + name: "multiple plugins, one success, one error", postBatchPlugins: []PostBatchPlugin{ &DummyAllPurposePlugin{ name: dummyPostBatchPluginNameA, diff --git a/pkg/scheduler/framework/frameworkutils.go b/pkg/scheduler/framework/frameworkutils.go index cf5caeda8..ac8f26cd9 100644 --- a/pkg/scheduler/framework/frameworkutils.go +++ b/pkg/scheduler/framework/frameworkutils.go @@ -40,7 +40,7 @@ func classifyBindings(policy *placementv1beta1.ClusterSchedulingPolicySnapshot, unscheduled = make([]*placementv1beta1.ClusterResourceBinding, 0, len(bindings)) dangling = make([]*placementv1beta1.ClusterResourceBinding, 0, len(bindings)) - // Build a map for clusters for quick loopup. + // Build a map for clusters for quick lookup. clusterMap := make(map[string]clusterv1beta1.MemberCluster) for _, cluster := range clusters { clusterMap[cluster.Name] = cluster @@ -218,7 +218,7 @@ func patchBindingFromScoredCluster(binding *placementv1beta1.ClusterResourceBind updated := binding.DeepCopy() affinityScore := int32(scored.Score.AffinityScore) topologySpreadScore := int32(scored.Score.TopologySpreadScore) - // Update the binding so that it is associated with the lastest scheduling policy. + // Update the binding so that it is associated with the latest scheduling policy. updated.Spec.State = desiredState updated.Spec.SchedulingPolicySnapshotName = policy.Name // copy the scheduling decision @@ -243,7 +243,7 @@ func patchBindingFromFixedCluster(binding *placementv1beta1.ClusterResourceBindi clusterName string, policy *placementv1beta1.ClusterSchedulingPolicySnapshot) *bindingWithPatch { // Update the binding so that it is associated with the latest score. updated := binding.DeepCopy() - // Update the binding so that it is associated with the lastest scheduling policy. + // Update the binding so that it is associated with the latest scheduling policy. updated.Spec.State = desiredState updated.Spec.SchedulingPolicySnapshotName = policy.Name // Technically speaking, overwriting the cluster decision is not needed, as the same value diff --git a/pkg/scheduler/framework/plugins/clusteraffinity/filtering.go b/pkg/scheduler/framework/plugins/clusteraffinity/filtering.go index 4e210a602..5d7f2ffc7 100644 --- a/pkg/scheduler/framework/plugins/clusteraffinity/filtering.go +++ b/pkg/scheduler/framework/plugins/clusteraffinity/filtering.go @@ -58,7 +58,7 @@ func (p *Plugin) Filter( // The cluster matches with the required affinity term; mark it as eligible for // resource placement. // - // Note that when there are mulitiple cluster selector terms, the results are OR'd. + // Note that when there are multiple cluster selector terms, the results are OR'd. return nil } } diff --git a/pkg/scheduler/framework/plugins/clusteraffinity/types.go b/pkg/scheduler/framework/plugins/clusteraffinity/types.go index 210bfd812..911bea26c 100644 --- a/pkg/scheduler/framework/plugins/clusteraffinity/types.go +++ b/pkg/scheduler/framework/plugins/clusteraffinity/types.go @@ -63,7 +63,7 @@ func retrieveResourceUsageFrom(cluster *clusterv1beta1.MemberCluster, name strin if !found { // The property concerns a resource that is not present in the resource usage data. // - // It cound be that the resource is not available in the cluster; consequently Fleet + // It could be that the resource is not available in the cluster; consequently Fleet // does not consider this as an error. return nil, nil } diff --git a/pkg/scheduler/framework/plugins/topologyspreadconstraints/utils.go b/pkg/scheduler/framework/plugins/topologyspreadconstraints/utils.go index 2c9ab01f6..9934fb8cb 100644 --- a/pkg/scheduler/framework/plugins/topologyspreadconstraints/utils.go +++ b/pkg/scheduler/framework/plugins/topologyspreadconstraints/utils.go @@ -274,7 +274,7 @@ func evaluateAllConstraints( } if violated { // A violation happens; since this is a ScheduleAnyway topology spread constraint, - // a violation score penality is applied to the score. + // a violation score penalty is applied to the score. scores[clusterName(cluster.Name)] -= maxSkewViolationPenality continue } diff --git a/pkg/scheduler/framework/uniquename/uniquename.go b/pkg/scheduler/framework/uniquename/uniquename.go index baf63c3c4..2e0ae4f40 100644 --- a/pkg/scheduler/framework/uniquename/uniquename.go +++ b/pkg/scheduler/framework/uniquename/uniquename.go @@ -40,7 +40,7 @@ func minInt(a, b int) int { // In addition, note that this function assumes that both the CRP name and the cluster name // are valid DNS label names (RFC 1123). func NewClusterResourceBindingName(CRPName string, clusterName string) (string, error) { - reservedSlots := 2 + uuidLength // 2 dashs + 8 character UUID string + reservedSlots := 2 + uuidLength // 2 dashes + 8 character UUID string slotsPerSeg := (validation.DNS1123LabelMaxLength - reservedSlots) / 2 uniqueName := fmt.Sprintf("%s-%s-%s", diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 471020de4..60ee67f3c 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -53,7 +53,7 @@ type Scheduler struct { // Kubernetes API server; in most cases client should be used instead, unless consistency becomes // a serious concern. // - // TO-DO (chenyu1): explore the possbilities of using a mutation cache for better performance. + // TO-DO (chenyu1): explore the possibilities of using a mutation cache for better performance. uncachedReader client.Reader // manager is the controller manager in use by the scheduler. diff --git a/test/e2e/scheduler_watchers_test.go b/test/e2e/scheduler_watchers_test.go index 71cfce98b..ed95ec4be 100644 --- a/test/e2e/scheduler_watchers_test.go +++ b/test/e2e/scheduler_watchers_test.go @@ -501,7 +501,7 @@ var _ = Describe("responding to specific member cluster changes", func() { Expect(hubClient.Create(ctx, crp)).To(Succeed()) }) - It("should propgate works for the new cluster; can mark them as applied", func() { + It("should propagate works for the new cluster; can mark them as applied", func() { verifyWorkPropagationAndMarkAsAvailable(fakeClusterName1ForWatcherTests, crpName, workResourceIdentifiers()) }) diff --git a/test/e2e/v1alpha1/utils/workload_test_utils.go b/test/e2e/v1alpha1/utils/workload_test_utils.go index a38aadd4d..2a78ed00a 100644 --- a/test/e2e/v1alpha1/utils/workload_test_utils.go +++ b/test/e2e/v1alpha1/utils/workload_test_utils.go @@ -101,7 +101,7 @@ func WaitCreateClusterResourcePlacementStatus(ctx context.Context, cluster frame return err } if statusDiff := cmp.Diff(wantCRPStatus, gotCRP.Status, crpStatusCmpOptions...); statusDiff != "" { - return fmt.Errorf("cluster resource placment(%s) status mismatch (-want +got):\n%s", gotCRP.Name, statusDiff) + return fmt.Errorf("cluster resource placement(%s) status mismatch (-want +got):\n%s", gotCRP.Name, statusDiff) } return nil }, customTimeout, PollInterval).Should(gomega.Succeed(), "Failed to wait for cluster resource placement %s status to be updated", gotCRP.Name, cluster.ClusterName) diff --git a/test/e2e/v1alpha1/webhook_test.go b/test/e2e/v1alpha1/webhook_test.go index 70dd42441..7c7675d7a 100644 --- a/test/e2e/v1alpha1/webhook_test.go +++ b/test/e2e/v1alpha1/webhook_test.go @@ -1249,7 +1249,7 @@ var _ = Describe("Fleet's Reserved Namespace Handler fleet network tests", Order return err } var statusErr *k8sErrors.StatusError - g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update Internal Serivce Export call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update Internal Service Export call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) g.Expect(statusErr.Status().Message).Should(ContainSubstring(fmt.Sprintf(validation.ResourceDeniedFormat, testUser, utils.GenerateGroupString(testGroups), admissionv1.Update, &iseGVK, "", types.NamespacedName{Name: ise.Name, Namespace: ise.Namespace}))) return nil }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) diff --git a/test/e2e/v1alpha1/work_api_e2e_test.go b/test/e2e/v1alpha1/work_api_e2e_test.go index 5733e1e20..11671413b 100644 --- a/test/e2e/v1alpha1/work_api_e2e_test.go +++ b/test/e2e/v1alpha1/work_api_e2e_test.go @@ -141,7 +141,7 @@ var _ = Describe("Work API Controller test", func() { return cmp.Diff(want, work.Status.Conditions, cmpOptions...) }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus mismatch (-want, +got):") - By(fmt.Sprintf("Manifest Condiitons on Work Objects %s should be applied", namespaceType)) + By(fmt.Sprintf("Manifest Conditions on Work Objects %s should be applied", namespaceType)) wantManifestCondition := []workapi.ManifestCondition{ { Conditions: []metav1.Condition{ @@ -279,7 +279,7 @@ var _ = Describe("Work API Controller test", func() { return cmp.Diff(want, workTwo.Status.Conditions, cmpOptions...) }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus mismatch (-want, +got):") - By(fmt.Sprintf("Manifest Condiitons on Work Objects %s and %s should be applied", namespaceTypeOne, namespaceTypeTwo)) + By(fmt.Sprintf("Manifest Conditions on Work Objects %s and %s should be applied", namespaceTypeOne, namespaceTypeTwo)) wantManifestCondition := []workapi.ManifestCondition{ { Conditions: []metav1.Condition{ @@ -425,7 +425,7 @@ var _ = Describe("Work API Controller test", func() { }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Applied Condition mismatch for work %s (-want, +got):", workName) - By(fmt.Sprintf("Manifest Condiitons on Work Objects %s should be applied", namespaceType)) + By(fmt.Sprintf("Manifest Conditions on Work Objects %s should be applied", namespaceType)) expectedManifestCondition := []workapi.ManifestCondition{ { Conditions: []metav1.Condition{ @@ -521,7 +521,7 @@ var _ = Describe("Work API Controller test", func() { }, } - Expect(cmp.Diff(wantCRD, crd, crdCmpOptions...)).Should(BeEmpty(), "Valdate CRD object mismatch (-want, got+):") + Expect(cmp.Diff(wantCRD, crd, crdCmpOptions...)).Should(BeEmpty(), "Validate CRD object mismatch (-want, got+):") By(fmt.Sprintf("CR %s should have been created in cluster %s", crdObjectName, MemberCluster.ClusterName)) @@ -634,7 +634,7 @@ var _ = Describe("Work API Controller test", func() { }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus for work %s mismatch (-want, +got):", workForServiceAccount) - By(fmt.Sprintf("Manifest Condiitons on Work Objects %s and %s should be applied", namespaceTypeForNamespaceWork, namespaceTypeForServiceAccountWork)) + By(fmt.Sprintf("Manifest Conditions on Work Objects %s and %s should be applied", namespaceTypeForNamespaceWork, namespaceTypeForServiceAccountWork)) wantNamespaceManifestCondition := []workapi.ManifestCondition{ { Conditions: []metav1.Condition{ diff --git a/test/integration/cluster_placement_test.go b/test/integration/cluster_placement_test.go index a2aa5bb74..89a02243e 100644 --- a/test/integration/cluster_placement_test.go +++ b/test/integration/cluster_placement_test.go @@ -45,7 +45,7 @@ var _ = Describe("Test Cluster Resource Placement Controller", func() { BeforeEach(func() { By("Create member cluster A ") - // create a new cluster everytime since namespace deletion doesn't work in testenv + // create a new cluster every time since namespace deletion doesn't work in testenv clusterA = fleetv1alpha1.MemberCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster-a-" + utilrand.String(8), diff --git a/test/scheduler/pickn_integration_test.go b/test/scheduler/pickn_integration_test.go index 8d4dccbbc..8e52f9bfd 100644 --- a/test/scheduler/pickn_integration_test.go +++ b/test/scheduler/pickn_integration_test.go @@ -1549,7 +1549,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { // // Normally upscaling is done by increasing the number of clusters field in the CRP; // however, since in the integration test environment, CRP controller is not available, - // we directly manipulate the number of clusters annoation on the policy snapshot + // we directly manipulate the number of clusters annotation on the policy snapshot // to trigger upscaling. Eventually(func() error { policySnapshot := &placementv1beta1.ClusterSchedulingPolicySnapshot{} @@ -1640,7 +1640,7 @@ var _ = Describe("scheduling CRPs of the PickN placement type", func() { // // Normally downscaling is done by increasing the number of clusters field in the CRP; // however, since in the integration test environment, CRP controller is not available, - // we directly manipulate the number of clusters annoation on the policy snapshot + // we directly manipulate the number of clusters annotation on the policy snapshot // to trigger downscaling. Eventually(func() error { policySnapshot := &placementv1beta1.ClusterSchedulingPolicySnapshot{}