From d6fa59e9cb6e55664d20c431f9287e6cc877c42e Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 26 Jun 2020 17:03:57 +0200 Subject: [PATCH 1/8] storage capacity: initial implementation This is the producer side of KEP https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/1472-storage-capacity-tracking. Only deployment together with a central controller is currently implemented. When syncing directly whenever there is a change, there's potentially a larger number of changes emitted. When there are rapid changes (for example, while a driver gets deployed), it may be better to delay processing and thus combine multiple changes in a single sync. --- README.md | 96 +- cmd/csi-provisioner/csi-provisioner.go | 75 +- deploy/kubernetes/rbac.yaml | 8 + go.sum | 1 + pkg/capacity/capacity.go | 574 +++++++++ pkg/capacity/capacity_test.go | 1121 +++++++++++++++++ pkg/capacity/doc.go | 19 + pkg/capacity/features.go | 77 ++ pkg/capacity/features_test.go | 81 ++ pkg/capacity/topology/doc.go | 21 + pkg/capacity/topology/nodes.go | 288 +++++ pkg/capacity/topology/nodes_test.go | 633 ++++++++++ pkg/capacity/topology/topology.go | 137 ++ .../k8s.io/apimachinery/pkg/util/rand/rand.go | 127 ++ vendor/modules.txt | 1 + 15 files changed, 3257 insertions(+), 2 deletions(-) create mode 100644 pkg/capacity/capacity.go create mode 100644 pkg/capacity/capacity_test.go create mode 100644 pkg/capacity/doc.go create mode 100644 pkg/capacity/features.go create mode 100644 pkg/capacity/features_test.go create mode 100644 pkg/capacity/topology/doc.go create mode 100644 pkg/capacity/topology/nodes.go create mode 100644 pkg/capacity/topology/nodes_test.go create mode 100644 pkg/capacity/topology/topology.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/rand/rand.go diff --git a/README.md b/README.md index 213eef3633..06f9c39ba4 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ Following table reflects the head of this branch. | -------------- | ------- | ------- | --------------------------------------------------------------------------------------------- | --------------------------------- | | Snapshots | Beta | On | [Snapshots and Restore](https://kubernetes-csi.github.io/docs/snapshot-restore-feature.html). | No | | CSIMigration | Beta | On | [Migrating in-tree volume plugins to CSI](https://kubernetes.io/docs/concepts/storage/volumes/#csi-migration). | No | +| CSIStorageCapacity | Alpha | Off | Publish [capacity information](https://kubernetes.io/docs/concepts/storage/volumes/#storage-capacity) for the Kubernetes scheduler. | No | All other external-provisioner features and the external-provisioner itself is considered GA and fully supported. @@ -61,7 +62,7 @@ Note that the external-provisioner does not scale with more replicas. Only one e * `--kube-api-burst `: Burst for clients that communicate with the kubernetes apiserver. Defaults to `10`. -* `--cloning-protection-threads `: Number of simultaniously running threads, handling cloning finalizer removal. Defaults to `1`. +* `--cloning-protection-threads `: Number of simultaneously running threads, handling cloning finalizer removal. Defaults to `1`. * `--metrics-address`: The TCP network address where the prometheus metrics endpoint will run (example: `:8080` which corresponds to port 8080 on local host). The default is empty string, which means metrics endpoint is disabled. @@ -69,6 +70,17 @@ Note that the external-provisioner does not scale with more replicas. Only one e * `--extra-create-metadata`: Enables the injection of extra PVC and PV metadata as parameters when calling `CreateVolume` on the driver (keys: "csi.storage.k8s.io/pvc/name", "csi.storage.k8s.io/pvc/namespace", "csi.storage.k8s.io/pv/name") +##### Storage capacity arguments + +See the [storage capacity section](#capacity-support) below for details. + +* `--capacity-threads `: Number of simultaneously running threads, handling CSIStorageCapacity objects. Defaults to `1`. + +* `--capacity-poll-interval `: How long the external-provisioner waits before checking for storage capacity changes. Defaults to `1m`. + +* `--enable-capacity `: Enables producing CSIStorageCapacity objects with capacity information from the driver's GetCapacity call. Currently supported: `--enable-capacity=central`. + + #### Other recognized arguments * `--feature-gates `: A set of comma separated `=` pairs that describe feature gates for alpha/experimental features. See [list of features](#feature-status) or `--help` output for list of recognized features. Example: `--feature-gates Topology=true` to enable Topology feature that's disabled by default. @@ -102,6 +114,88 @@ Yes | No | Yes | `Requisite` = Allowed topologies
`Preferred` = `Requisite` w No | Irrelevant | No | `Requisite` = Aggregated cluster topology
`Preferred` = `Requisite` with randomly selected node topology as first element No | Irrelevant | Yes | `Requisite` = Allowed topologies
`Preferred` = `Requisite` with randomly selected node topology as first element +### Capacity support + +> :warning: *Warning:* This is an alpha feature and only supported by +> Kubernetes >= 1.19 if the `CSIStorageCapacity` feature gate is +> enabled. + +The external-provisioner can be used to create CSIStorageCapacity +objects that hold information about the storage capacity available +through the driver. The Kubernetes scheduler then [uses that +information](https://kubernetes.io/docs/concepts/storage/storage-capacity] +when selecting nodes for pods with unbound volumes that wait for the +first consumer. + +To enable this feature in a driver deployment: +- Set the `POD_NAME` and `POD_NAMESPACE` environment variables like this: +```yaml + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name +``` +- Add `--enable-capacity=central` to the command line flags. +- Add `StorageCapacity: true` to the CSIDriver information object. + Without it, external-provisioner will publish information, but the + Kubernetes scheduler will ignore it. This can be used to first + deploy the driver without that flag, then when sufficient + information has been published, enabled the scheduler usage of it. +- Optional: configure how often external-provisioner polls the driver + to detect changed capacity with `--capacity-poll-interval`. +- Optional: configure how many worker threads are used in parallel + with `--capacity-threads`. + +To determine how many different topology segments exist, +external-provisioner uses the topology keys and labels that the CSI +driver instance on each node reports to kubelet in the +`NodeGetInfoResponse.accessible_topology` field. The keys are stored +by kubelet in the CSINode objects and the actual values in Node +annotations. + +CSI drivers must report topology information that matches the storage +pool(s) that it has access to, with granularity that matches the most +restrictive pool. + +For example, if the driver runs in a node with region/rack topology +and has access to per-region storage as well as per-rack storage, then +the driver should report topology with region/rack as its keys. If it +only has access to per-region storage, then it should just use region +as key. If it uses region/rack, then redundant CSIStorageCapacity +objects will be published, but the information is still correct. See +the +[KEP](https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/1472-storage-capacity-tracking#with-central-controller) +for details. + +For each segment and each storage class, CSI `GetCapacity` is called +once with the topology of the segment and the parameters of the +class. If there is no error and the capacity is non-zero, a +CSIStorageCapacity object is created or updated (if it +already exists from a prior call) with that information. Obsolete +objects are removed. + +To ensure that CSIStorageCapacity objects get removed when the +external-provisioner gets removed from the cluster, they all have an +owner and therefore get garbage-collected when that owner +disappears. The owner is not the external-provisioner pod itself but +rather its parent. This way, it is possible to switch between +external-provisioner instances without losing the already gathered +information. + +CSIStorageCapacity objects are namespaced and get created in the +namespace of the external-provisioner. Only CSIStorageCapacity objects +with the right owner are modified by external-provisioner and their +name is generated, so it is possible to deploy different drivers in +the same namespace. However, Kubernetes does not check who is creating +CSIStorageCapacity objects, so in theory a malfunctioning or malicious +driver deployment could also publish incorrect information about some +other driver. + ### CSI error and timeout handling The external-provisioner invokes all gRPC calls to CSI driver with timeout provided by `--timeout` command line argument (15 seconds by default). diff --git a/cmd/csi-provisioner/csi-provisioner.go b/cmd/csi-provisioner/csi-provisioner.go index fa86788f12..5a73164a7e 100644 --- a/cmd/csi-provisioner/csi-provisioner.go +++ b/cmd/csi-provisioner/csi-provisioner.go @@ -26,7 +26,9 @@ import ( "strings" "time" + "github.com/container-storage-interface/spec/lib/go/csi" flag "github.com/spf13/pflag" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" @@ -43,6 +45,8 @@ import ( "github.com/kubernetes-csi/csi-lib-utils/deprecatedflags" "github.com/kubernetes-csi/csi-lib-utils/leaderelection" "github.com/kubernetes-csi/csi-lib-utils/metrics" + "github.com/kubernetes-csi/external-provisioner/pkg/capacity" + "github.com/kubernetes-csi/external-provisioner/pkg/capacity/topology" ctrl "github.com/kubernetes-csi/external-provisioner/pkg/controller" snapclientset "github.com/kubernetes-csi/external-snapshotter/v2/pkg/client/clientset/versioned" ) @@ -58,7 +62,8 @@ var ( retryIntervalStart = flag.Duration("retry-interval-start", time.Second, "Initial retry interval of failed provisioning or deletion. It doubles with each failure, up to retry-interval-max.") retryIntervalMax = flag.Duration("retry-interval-max", 5*time.Minute, "Maximum retry interval of failed provisioning or deletion.") workerThreads = flag.Uint("worker-threads", 100, "Number of provisioner worker threads, in other words nr. of simultaneous CSI calls.") - finalizerThreads = flag.Uint("cloning-protection-threads", 1, "Number of simultaniously running threads, handling cloning finalizer removal") + finalizerThreads = flag.Uint("cloning-protection-threads", 1, "Number of simultaneously running threads, handling cloning finalizer removal") + capacityThreads = flag.Uint("capacity-threads", 1, "Number of simultaneously running threads, handling CSIStorageCapacity objects") operationTimeout = flag.Duration("timeout", 10*time.Second, "Timeout for waiting for creation or deletion of a volume") _ = deprecatedflags.Add("provisioner") @@ -76,6 +81,13 @@ var ( kubeAPIQPS = flag.Float32("kube-api-qps", 5, "QPS to use while communicating with the kubernetes apiserver. Defaults to 5.0.") kubeAPIBurst = flag.Int("kube-api-burst", 10, "Burst to use while communicating with the kubernetes apiserver. Defaults to 10.") + capacityFeatures = func() *capacity.Features { + capacity := &capacity.Features{} + flag.Var(capacity, "enable-capacity", "Enables producing CSIStorageCapacity objects with capacity information from the driver's GetCapacity call. Currently supported: --enable-capacity=central.") + return capacity + }() + capacityPollInterval = flag.Duration("capacity-poll-interval", time.Minute, "How long the external-provisioner waits before checking for storage capacity changes.") + featureGates map[string]bool provisionController *controller.ProvisionController version = "unknown" @@ -181,6 +193,7 @@ func main() { identity := strconv.FormatInt(timeStamp, 10) + "-" + strconv.Itoa(rand.Intn(10000)) + "-" + provisionerName factory := informers.NewSharedInformerFactory(clientset, ctrl.ResyncPeriodOfCsiNodeInformer) + var factoryForNamespace informers.SharedInformerFactory // usually nil, only used for CSIStorageCapacity // ------------------------------- // Listers @@ -266,8 +279,65 @@ func main() { controllerCapabilities, ) + var capacityController *capacity.Controller + if (*capacityFeatures)[capacity.FeatureCentral] { + podName := os.Getenv("POD_NAME") + namespace := os.Getenv("POD_NAMESPACE") + if podName == "" || namespace == "" { + klog.Fatalf("need POD_NAMESPACE/POD_NAME env variables, have only POD_NAMESPACE=%q and POD_NAME=%q", namespace, podName) + } + pod, err := clientset.CoreV1().Pods(namespace).Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + klog.Fatalf("error getting our own pod: %v", err) + } + var controller *metav1.OwnerReference + for _, owner := range pod.OwnerReferences { + if owner.Controller != nil && *owner.Controller { + controller = &owner + break + } + } + if controller == nil { + klog.Fatal("pod does not have a controller which owns it") + } + + topologyInformer := topology.NewNodeTopology( + provisionerName, + clientset, + factory.Core().V1().Nodes(), + factory.Storage().V1().CSINodes(), + workqueue.NewNamedRateLimitingQueue(rateLimiter, "csitopology"), + ) + + // We only need objects from our own namespace. The normal factory would give + // us an informer for the entire cluster. + factoryForNamespace = informers.NewSharedInformerFactoryWithOptions(clientset, + ctrl.ResyncPeriodOfCsiNodeInformer, + informers.WithNamespace(namespace), + ) + + capacityController = capacity.NewCentralCapacityController( + csi.NewControllerClient(grpcClient), + provisionerName, + clientset, + // TODO: metrics for the queue?! + workqueue.NewNamedRateLimitingQueue(rateLimiter, "csistoragecapacity"), + *controller, + namespace, + topologyInformer, + factory.Storage().V1().StorageClasses(), + factoryForNamespace.Storage().V1alpha1().CSIStorageCapacities(), + *capacityPollInterval, + ) + } + run := func(ctx context.Context) { factory.Start(ctx.Done()) + if factoryForNamespace != nil { + // Starting is enough, the capacity controller will + // wait for sync. + factoryForNamespace.Start(ctx.Done()) + } cacheSyncResult := factory.WaitForCacheSync(ctx.Done()) for _, v := range cacheSyncResult { if !v { @@ -275,6 +345,9 @@ func main() { } } + if capacityController != nil { + go capacityController.Run(ctx, int(*capacityThreads)) + } if csiClaimController != nil { go csiClaimController.Run(ctx, int(*finalizerThreads)) } diff --git a/deploy/kubernetes/rbac.yaml b/deploy/kubernetes/rbac.yaml index 8d79962d26..6e8cdcfcdf 100644 --- a/deploy/kubernetes/rbac.yaml +++ b/deploy/kubernetes/rbac.yaml @@ -87,6 +87,14 @@ rules: - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create"] +# Permissions for CSIStorageCapacity are only needed enabling the publishing +# of storage capacity information. +- apiGroups: ["storage.k8s.io"] + resources: ["csistoragecapacities"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["get"] # Needed for CSIStorageCapacity owner determination. --- kind: RoleBinding diff --git a/go.sum b/go.sum index 981f2d8076..2f1fd5f7f9 100644 --- a/go.sum +++ b/go.sum @@ -555,6 +555,7 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200716221620-18dfb9cca345 h1:2gOG36vt1BhUqpzxwZLZJxUim2dHB05vw+RAn4Q6YOU= go.etcd.io/etcd v0.5.0-alpha.5.0.20200716221620-18dfb9cca345/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= diff --git a/pkg/capacity/capacity.go b/pkg/capacity/capacity.go new file mode 100644 index 0000000000..030686a5f1 --- /dev/null +++ b/pkg/capacity/capacity.go @@ -0,0 +1,574 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package capacity contains the code which controls the CSIStorageCapacity +// objects owned by the external-provisioner. +package capacity + +import ( + "context" + "fmt" + "reflect" + "sync" + "time" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-csi/external-provisioner/pkg/capacity/topology" + "google.golang.org/grpc" + storagev1 "k8s.io/api/storage/v1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + storageinformersv1 "k8s.io/client-go/informers/storage/v1" + storageinformersv1alpha1 "k8s.io/client-go/informers/storage/v1alpha1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" +) + +const ( + ResyncPeriodOfCSIStorageCapacityInformer = 1 * time.Hour // same as ResyncPeriodOfCsiNodeInformer +) + +// Controller creates and updates CSIStorageCapacity objects. It +// deletes those which are no longer needed because their storage +// class or topology segment are gone. The controller only manages +// those CSIStorageCapacity objects that are owned by a certain +// entity. +// +// The controller maintains a set of topology segments (= NodeSelector +// pointers). Work items are a combination of such a pointer and a +// pointer to a storage class. These keys are mapped to the +// corresponding CSIStorageCapacity object, if one exists. +// +// When processing a work item, the controller first checks whether +// the topology segment and storage class still exist. If not, +// the CSIStorageCapacity object gets deleted. Otherwise, it gets updated +// or created. +// +// New work items are queued for processing when the reconiliation loop +// finds differences, periodically (to refresh existing items) and when +// capacity is expected to have changed. +// +// The work queue is also used to delete duplicate CSIStorageCapacity objects, +// i.e. those that for some reason have the same topology segment +// and storage class name as some other object. That should never happen, +// but the controller is prepared to clean that up, just in case. +type Controller struct { + csiController CSICapacityClient + driverName string + client kubernetes.Interface + queue workqueue.RateLimitingInterface + owner metav1.OwnerReference + ownerNamespace string + topologyInformer topology.Informer + scInformer storageinformersv1.StorageClassInformer + cInformer storageinformersv1alpha1.CSIStorageCapacityInformer + pollPeriod time.Duration + + // capacities contains one entry for each object that is supposed + // to exist. + capacities map[workItem]*storagev1alpha1.CSIStorageCapacity + capacitiesLock sync.Mutex +} + +type workItem struct { + segment *topology.Segment + storageClassName string +} + +var ( + // Defines parameters for ExponentialBackoff used while starting up + // and listing CSIStorageCapacity objects. + listCSIStorageCapacityBackoff = wait.Backoff{ + Duration: time.Second * 5, + Factor: 1.1, + Steps: 10, + } +) + +// CSICapacityClient is the relevant subset of csi.ControllerClient. +type CSICapacityClient interface { + GetCapacity(ctx context.Context, in *csi.GetCapacityRequest, opts ...grpc.CallOption) (*csi.GetCapacityResponse, error) +} + +// NewController creates a new controller for CSIStorageCapacity objects. +func NewCentralCapacityController( + csiController CSICapacityClient, + driverName string, + client kubernetes.Interface, + queue workqueue.RateLimitingInterface, + owner metav1.OwnerReference, + ownerNamespace string, + topologyInformer topology.Informer, + scInformer storageinformersv1.StorageClassInformer, + cInformer storageinformersv1alpha1.CSIStorageCapacityInformer, + pollPeriod time.Duration, +) *Controller { + c := &Controller{ + csiController: csiController, + driverName: driverName, + client: client, + queue: queue, + owner: owner, + ownerNamespace: ownerNamespace, + topologyInformer: topologyInformer, + scInformer: scInformer, + cInformer: cInformer, + pollPeriod: pollPeriod, + capacities: map[workItem]*storagev1alpha1.CSIStorageCapacity{}, + } + + // Now register for changes. Depending on the implementation of the informers, + // this may already invoke callbacks. + handler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.onSCAddOrUpdate(obj.(*storagev1.StorageClass)) }, + UpdateFunc: func(_ interface{}, newObj interface{}) { c.onSCAddOrUpdate(newObj.(*storagev1.StorageClass)) }, + DeleteFunc: func(obj interface{}) { c.onSCDelete(obj.(*storagev1.StorageClass)) }, + } + c.scInformer.Informer().AddEventHandler(handler) + c.topologyInformer.AddCallback(c.onTopologyChanges) + + // We don't want the callbacks yet, but need to ensure that + // the informer controller is instantiated before the caller + // starts the factory. + cInformer.Informer() + + return c +} + +// Run is a main Controller handler +func (c *Controller) Run(ctx context.Context, threadiness int) { + klog.Info("Starting Capacity Controller") + defer c.queue.ShutDown() + go c.scInformer.Informer().Run(ctx.Done()) + go c.topologyInformer.Run(ctx) + + c.prepare(ctx) + for i := 0; i < threadiness; i++ { + go wait.UntilWithContext(ctx, func(ctx context.Context) { + c.runWorker(ctx) + }, time.Second) + } + + go wait.UntilWithContext(ctx, func(ctx context.Context) { c.pollCapacities() }, c.pollPeriod) + + klog.Info("Started Capacity Controller") + <-ctx.Done() + klog.Info("Shutting down Capacity Controller") +} + +func (c *Controller) prepare(ctx context.Context) { + // Wait for topology and storage class informer sync. Once we have that, + // we know which CSIStorageCapacity objects we need. + if !cache.WaitForCacheSync(ctx.Done(), c.topologyInformer.HasSynced, c.scInformer.Informer().HasSynced, c.cInformer.Informer().HasSynced) { + return + } + + // The caches are fully populated now, but the event handlers + // may or may not have been invoked yet. To be sure that we + // have all data, we need to list all resources. Here we list + // topology segments, onTopologyChanges lists the classes. + c.onTopologyChanges(c.topologyInformer.List(), nil) + + if klog.V(3) { + scs, err := c.scInformer.Lister().List(labels.Everything()) + if err != nil { + // Shouldn't happen. + utilruntime.HandleError(err) + } + klog.V(3).Infof("Initial number of topology segments %d, storage classes %d, potential CSIStorageCapacity objects %d", + len(c.topologyInformer.List()), + len(scs), + len(c.capacities)) + } + + // Now that we know what we need, we can check what we have. + // We do that both via callbacks *and* by iterating over all + // objects: callbacks handle future updates and iterating + // avoids the assumumption that the callback will be invoked + // for all objects immediately when adding it. + klog.V(3).Info("Checking for existing CSIStorageCapacity objects") + handler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.onCAddOrUpdate(ctx, obj.(*storagev1alpha1.CSIStorageCapacity)) }, + UpdateFunc: func(_ interface{}, newObj interface{}) { + c.onCAddOrUpdate(ctx, newObj.(*storagev1alpha1.CSIStorageCapacity)) + }, + DeleteFunc: func(obj interface{}) { c.onCDelete(ctx, obj.(*storagev1alpha1.CSIStorageCapacity)) }, + } + c.cInformer.Informer().AddEventHandler(handler) + capacities, err := c.cInformer.Lister().List(labels.Everything()) + if err != nil { + // This shouldn't happen. + utilruntime.HandleError(err) + return + } + for _, capacity := range capacities { + c.onCAddOrUpdate(ctx, capacity) + } + + // Now that we have seen all existing objects, we are done + // with the preparation and can let our caller start + // processing work items. +} + +// onTopologyChanges is called by the topology informer. +func (c *Controller) onTopologyChanges(added []*topology.Segment, removed []*topology.Segment) { + klog.V(3).Infof("Capacity Controller: topology changed: added %v, removed %v", added, removed) + + storageclasses, err := c.scInformer.Lister().List(labels.Everything()) + if err != nil { + utilruntime.HandleError(err) + return + } + + c.capacitiesLock.Lock() + defer c.capacitiesLock.Unlock() + + for _, sc := range storageclasses { + if sc.Provisioner != c.driverName { + continue + } + for _, segment := range added { + c.addWorkItem(segment, sc) + } + for _, segment := range removed { + c.removeWorkItem(segment, sc) + } + } +} + +// onSCAddOrUpdate is called for add or update events by the storage +// class listener. +func (c *Controller) onSCAddOrUpdate(sc *storagev1.StorageClass) { + if sc.Provisioner != c.driverName { + return + } + + klog.V(3).Infof("Capacity Controller: storage class %s was updated or added", sc.Name) + segments := c.topologyInformer.List() + + c.capacitiesLock.Lock() + defer c.capacitiesLock.Unlock() + for _, segment := range segments { + c.addWorkItem(segment, sc) + } +} + +// onSCDelete is called for delete events by the storage class listener. +func (c *Controller) onSCDelete(sc *storagev1.StorageClass) { + if sc.Provisioner != c.driverName { + return + } + + klog.V(3).Infof("Capacity Controller: storage class %s was removed", sc.Name) + segments := c.topologyInformer.List() + + c.capacitiesLock.Lock() + defer c.capacitiesLock.Unlock() + for _, segment := range segments { + c.removeWorkItem(segment, sc) + } +} + +// addWorkItem ensures that there is an item in c.capacities. It +// must be called while holding c.capacitiesLock! +func (c *Controller) addWorkItem(segment *topology.Segment, sc *storagev1.StorageClass) { + item := workItem{ + segment: segment, + storageClassName: sc.Name, + } + // Ensure that we have an entry for it... + capacity := c.capacities[item] + c.capacities[item] = capacity + // ... and then tell our workers to update + // or create that capacity object. + klog.V(5).Infof("Capacity Controller: enqueuing %+v", item) + c.queue.Add(item) +} + +// removeWorkItem ensures that the item gets removed from c.capacities. It +// must be called while holding c.capacitiesLock! +func (c *Controller) removeWorkItem(segment *topology.Segment, sc *storagev1.StorageClass) { + item := workItem{ + segment: segment, + storageClassName: sc.Name, + } + capacity, found := c.capacities[item] + if !found { + // Already gone or in the queue to be removed. + klog.V(5).Infof("Capacity Controller: %+v already removed", item) + return + } + // Deleting the item will prevent further updates to + // it, in case that it is already in the queue. + delete(c.capacities, item) + + if capacity == nil { + // No object to remove. + klog.V(5).Infof("Capacity Controller: %+v removed, no object", item) + return + } + + // Any capacity object in the queue will be deleted. + klog.V(5).Infof("Capacity Controller: enqueuing CSIStorageCapacity %s for removal", capacity.Name) + c.queue.Add(capacity) +} + +// pollCapacities must be called periodically to detect when the underlying storage capacity has changed. +func (c *Controller) pollCapacities() { + c.capacitiesLock.Lock() + defer c.capacitiesLock.Unlock() + + for item := range c.capacities { + klog.V(5).Infof("Capacity Controller: enqueuing %+v for periodic update", item) + c.queue.Add(item) + } +} + +func (c *Controller) runWorker(ctx context.Context) { + for c.processNextWorkItem(ctx) { + } +} + +// processNextWorkItem processes items from queue. +func (c *Controller) processNextWorkItem(ctx context.Context) bool { + obj, shutdown := c.queue.Get() + if shutdown { + return false + } + + err := func() error { + defer c.queue.Done(obj) + + switch obj := obj.(type) { + case workItem: + return c.syncCapacity(ctx, obj) + case *storagev1alpha1.CSIStorageCapacity: + return c.deleteCapacity(ctx, obj) + default: + klog.Warningf("unexpected work item %#v", obj) + } + + return nil + }() + + if err != nil { + utilruntime.HandleError(err) + klog.Warningf("Retrying %#v after %d failures", obj, c.queue.NumRequeues(obj)) + c.queue.AddRateLimited(obj) + } else { + c.queue.Forget(obj) + } + + return true +} + +// syncCapacity gets the capacity and then updates or creates the object. +func (c *Controller) syncCapacity(ctx context.Context, item workItem) error { + // We lock only while accessing c.capacities, but not during + // the potentially long-running operations. That is okay + // because there is only a single worker per item. In the + // unlikely case that the desired state of the item changes + // while we work on it, we will add or update an obsolete + // object which we then don't store and instead queue for + // removal. + c.capacitiesLock.Lock() + capacity, found := c.capacities[item] + c.capacitiesLock.Unlock() + + klog.V(5).Infof("Capacity Controller: refreshing %+v", item) + if !found { + // The item was removed in the meantime. This can happen when the storage class + // or the topology segment are gone. + klog.V(5).Infof("Capacity Controller: %v became obsolete", item) + return nil + } + + sc, err := c.scInformer.Lister().Get(item.storageClassName) + if err != nil { + if apierrs.IsNotFound(err) { + // Another indication that the value is no + // longer needed. + return nil + } + return fmt.Errorf("retrieve storage class for %+v: %v", item, err) + } + + req := &csi.GetCapacityRequest{ + Parameters: sc.Parameters, + // The assumption is that the capacity is independent of the + // capabilities. The standard makes it mandatory to pass something, + // therefore we pick something rather arbitrarily. + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{}, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_UNKNOWN, + }, + }, + }, + } + if item.segment != nil { + req.AccessibleTopology = &csi.Topology{ + Segments: item.segment.GetLabelMap(), + } + } + resp, err := c.csiController.GetCapacity(ctx, req) + if err != nil { + return fmt.Errorf("CSI GetCapacity for %+v: %v", item, err) + } + + quantity := resource.NewQuantity(resp.AvailableCapacity, resource.BinarySI) + if capacity == nil { + // Create new object. + capacity = &storagev1alpha1.CSIStorageCapacity{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "csisc-", + OwnerReferences: []metav1.OwnerReference{c.owner}, + }, + StorageClassName: item.storageClassName, + NodeTopology: item.segment.GetLabelSelector(), + Capacity: quantity, + } + var err error + klog.V(5).Infof("Capacity Controller: creating new object for %+v, new capacity %v", item, quantity) + capacity, err = c.client.StorageV1alpha1().CSIStorageCapacities(c.ownerNamespace).Create(ctx, capacity, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("create CSIStorageCapacity for %+v: %v", item, err) + } + klog.V(5).Infof("Capacity Controller: created %s for %+v with capacity %v", capacity.Name, item, quantity) + } else if capacity.Capacity.Value() == quantity.Value() { + klog.V(5).Infof("Capacity Controller: no need to update %s for %+v, same capacity %v", capacity.Name, item, quantity) + return nil + } else { + // Update existing object. Must not modify object in the informer cache. + capacity := capacity.DeepCopy() + capacity.Capacity = quantity + var err error + klog.V(5).Infof("Capacity Controller: updating %s for %+v, new capacity %v", capacity.Name, item, quantity) + capacity, err = c.client.StorageV1alpha1().CSIStorageCapacities(capacity.Namespace).Update(ctx, capacity, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("update CSIStorageCapacity for %+v: %v", item, err) + } + } + + c.capacitiesLock.Lock() + _, found = c.capacities[item] + if found { + // Remember the new or updated object for future updates. + c.capacities[item] = capacity + } else { + klog.V(5).Infof("Capacity Controller: %+v became obsolete during refresh, enqueue %s for deletion", item, capacity.Name) + c.queue.Add(capacity) + } + c.capacitiesLock.Unlock() + + return nil +} + +// deleteCapacity ensures that the object is gone when done. +func (c *Controller) deleteCapacity(ctx context.Context, capacity *storagev1alpha1.CSIStorageCapacity) error { + klog.V(5).Infof("Capacity Controller: removing CSIStorageCapacity %s", capacity.Name) + err := c.client.StorageV1alpha1().CSIStorageCapacities(capacity.Namespace).Delete(ctx, capacity.Name, metav1.DeleteOptions{}) + if err != nil && apierrs.IsNotFound(err) { + return nil + } + return err +} + +// syncCSIStorageObject takes a read-only CSIStorageCapacity object +// and either remembers the pointer to it for future updates or +// ensures that it gets deleted if no longer needed. Foreign objects +// are ignored. +func (c *Controller) onCAddOrUpdate(ctx context.Context, capacity *storagev1alpha1.CSIStorageCapacity) { + if !c.isControlledByUs(capacity.OwnerReferences) { + // Not ours (anymore?). For the unlikely case that someone removed our owner reference, + // we also must remove our reference to the object. + c.capacitiesLock.Lock() + defer c.capacitiesLock.Unlock() + for item, capacity2 := range c.capacities { + if capacity2 != nil && capacity2.UID == capacity.UID { + c.capacities[item] = nil + klog.V(5).Infof("Capacity Controller: CSIStorageCapacity %s owner was modified by someone, enqueueing %v for re-creation", capacity.Name, item) + c.queue.Add(item) + } + } + return + } + + c.capacitiesLock.Lock() + defer c.capacitiesLock.Unlock() + for item, capacity2 := range c.capacities { + if capacity2 != nil && capacity2.UID == capacity.UID { + // We already have matched the object. + klog.V(5).Infof("Capacity Controller: CSIStorageCapacity %s is already known to match %+v", capacity.Name, item) + // If it has a different capacity than our old copy, then someone else must have + // modified the capacity and we need to check the capacity anew. + if capacity2.Capacity.Value() != capacity.Capacity.Value() { + klog.V(5).Infof("Capacity Controller: CSIStorageCapacity %s was modified by someone, enqueueing %v for fixing", capacity.Name, item) + c.queue.Add(item) + } + // Either way, remember the new object revision to avoid the "conflict" error + // when we try to update the old object. + c.capacities[item] = capacity + return + } + if capacity2 == nil && + item.storageClassName == capacity.StorageClassName && + reflect.DeepEqual(item.segment.GetLabelSelector(), capacity.NodeTopology) { + // This is the capacity object for this particular combination + // of parameters. Reuse it. + klog.V(5).Infof("Capacity Controller: CSIStorageCapacity %s matches %+v", capacity.Name, item) + c.capacities[item] = capacity + return + } + } + // The CSIStorageCapacity object is obsolete, delete it. + klog.V(5).Infof("Capacity Controller: CSIStorageCapacity %s is obsolete, enqueue for removal", capacity.Name) + c.queue.Add(capacity) +} + +func (c *Controller) onCDelete(ctx context.Context, capacity *storagev1alpha1.CSIStorageCapacity) { + c.capacitiesLock.Lock() + defer c.capacitiesLock.Unlock() + for item, capacity2 := range c.capacities { + if capacity2 != nil && capacity2.UID == capacity.UID { + // The object is still needed. Someone else must have removed it. + // Re-create it... + klog.V(5).Infof("Capacity Controller: CSIStorageCapacity %s was removed by someone, enqueue %v for re-creation", capacity.Name, item) + c.capacities[item] = nil + c.queue.Add(item) + return + } + } +} + +// isControlledByUs implements the same logic as https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1?tab=doc#IsControlledBy, +// just with the expected owner identified directly with the UID. +func (c *Controller) isControlledByUs(owners []metav1.OwnerReference) bool { + for _, owner := range owners { + if owner.Controller != nil && *owner.Controller && owner.UID == c.owner.UID { + return true + } + } + return false +} diff --git a/pkg/capacity/capacity_test.go b/pkg/capacity/capacity_test.go new file mode 100644 index 0000000000..aa33ab8ddd --- /dev/null +++ b/pkg/capacity/capacity_test.go @@ -0,0 +1,1121 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capacity + +import ( + "context" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-csi/external-provisioner/pkg/capacity/topology" + "google.golang.org/grpc" + storagev1 "k8s.io/api/storage/v1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + krand "k8s.io/apimachinery/pkg/util/rand" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/informers" + fakeclientset "k8s.io/client-go/kubernetes/fake" + ktesting "k8s.io/client-go/testing" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" +) + +func init() { + klog.InitFlags(nil) +} + +const ( + driverName = "test-driver" + ownerNamespace = "testns" + csiscRev = "CSISC-REV-" +) + +var ( + yes = true + defaultOwner = metav1.OwnerReference{ + APIVersion: "apps/v1", + Kind: "statefulset", + Name: "test-driver", + UID: "309cd460-2d62-4f40-bbcf-b7765aac5a6d", + Controller: &yes, + } + noOwner = metav1.OwnerReference{} + otherOwner = metav1.OwnerReference{ + APIVersion: "apps/v1", + Kind: "statefulset", + Name: "other-test-driver", + UID: "11111111-2d62-4f40-bbcf-b7765aac5a6d", + Controller: &yes, + } + + layer0 = topology.Segment{ + {Key: "layer0", Value: "foo"}, + } + layer0other = topology.Segment{ + {Key: "layer0", Value: "bar"}, + } + mb = resource.MustParse("1Mi") +) + +// TestCapacityController checks that the controller handles the initial state and +// several different changes at runtime correctly. +func TestController(t *testing.T) { + testcases := map[string]struct { + topology mockTopology + storage mockCapacity + initialSCs []testSC + initialCapacities []testCapacity + expectedCapacities []testCapacity + modify func(ctx context.Context, clientSet *fakeclientset.Clientset, expected []testCapacity) (modifiedExpected []testCapacity, err error) + capacityChange func(ctx context.Context, storage *mockCapacity, expected []testCapacity) (modifiedExpected []testCapacity) + }{ + "empty": { + expectedCapacities: []testCapacity{}, + }, + "one segment": { + topology: mockTopology{ + segments: []*topology.Segment{&layer0}, + }, + expectedCapacities: []testCapacity{}, + }, + "one class": { + initialSCs: []testSC{ + { + name: "fast-sc", + driverName: driverName, + }, + }, + expectedCapacities: []testCapacity{}, + }, + "one capacity object": { + topology: mockTopology{ + segments: []*topology.Segment{&layer0}, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + }, + }, + initialSCs: []testSC{ + { + name: "other-sc", + driverName: driverName, + }, + }, + expectedCapacities: []testCapacity{ + { + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + }, + "reuse one capacity object, no changes": { + topology: mockTopology{ + segments: []*topology.Segment{&layer0}, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + }, + }, + initialSCs: []testSC{ + { + name: "other-sc", + driverName: driverName, + }, + }, + initialCapacities: []testCapacity{ + { + uid: "test-capacity-1", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + expectedCapacities: []testCapacity{ + { + uid: "test-capacity-1", + resourceVersion: csiscRev + "0", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + }, + "reuse one capacity object, update capacity": { + topology: mockTopology{ + segments: []*topology.Segment{&layer0}, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "2Gi", + }, + }, + initialSCs: []testSC{ + { + name: "other-sc", + driverName: driverName, + }, + }, + initialCapacities: []testCapacity{ + { + uid: "test-capacity-1", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + expectedCapacities: []testCapacity{ + { + uid: "test-capacity-1", + resourceVersion: csiscRev + "1", + segment: layer0, + storageClassName: "other-sc", + quantity: "2Gi", + }, + }, + }, + "obsolete object, missing SC": { + topology: mockTopology{ + segments: []*topology.Segment{&layer0}, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + }, + }, + initialCapacities: []testCapacity{ + { + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + expectedCapacities: []testCapacity{}, + }, + "obsolete object, missing segment": { + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + }, + }, + initialSCs: []testSC{ + { + name: "other-sc", + driverName: driverName, + }, + }, + initialCapacities: []testCapacity{ + { + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + }, + "ignore capacity with other owner": { + initialCapacities: []testCapacity{ + { + owner: &otherOwner, + uid: "test-capacity-1", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + expectedCapacities: []testCapacity{ + { + owner: &otherOwner, + uid: "test-capacity-1", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + }, + "ignore capacity with no owner": { + initialCapacities: []testCapacity{ + { + owner: &noOwner, + uid: "test-capacity-1", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + expectedCapacities: []testCapacity{ + { + owner: &noOwner, + uid: "test-capacity-1", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + }, + "two segments, two classes, four objects missing": { + topology: mockTopology{ + segments: []*topology.Segment{ + &layer0, + &layer0other, + }, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + "bar": "2Gi", + }, + }, + initialSCs: []testSC{ + { + name: "direct-sc", + driverName: driverName, + }, + { + name: "triple-sc", + driverName: driverName, + parameters: map[string]string{ + mockMultiplier: "3", + }, + }, + }, + expectedCapacities: []testCapacity{ + { + resourceVersion: csiscRev + "0", + segment: layer0, + storageClassName: "direct-sc", + quantity: "1Gi", + }, + { + resourceVersion: csiscRev + "0", + segment: layer0, + storageClassName: "triple-sc", + quantity: "3Gi", + }, + { + resourceVersion: csiscRev + "0", + segment: layer0other, + storageClassName: "direct-sc", + quantity: "2Gi", + }, + { + resourceVersion: csiscRev + "0", + segment: layer0other, + storageClassName: "triple-sc", + quantity: "6Gi", + }, + }, + }, + "two segments, two classes, four objects updated": { + topology: mockTopology{ + segments: []*topology.Segment{ + &layer0, + &layer0other, + }, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + "bar": "2Gi", + }, + }, + initialSCs: []testSC{ + { + name: "direct-sc", + driverName: driverName, + }, + { + name: "triple-sc", + driverName: driverName, + parameters: map[string]string{ + mockMultiplier: "3", + }, + }, + }, + initialCapacities: []testCapacity{ + { + uid: "test-capacity-1", + segment: layer0, + storageClassName: "direct-sc", + quantity: "1Mi", + }, + { + uid: "test-capacity-2", + segment: layer0, + storageClassName: "triple-sc", + quantity: "3Mi", + }, + { + uid: "test-capacity-3", + segment: layer0other, + storageClassName: "direct-sc", + quantity: "2Mi", + }, + { + uid: "test-capacity-4", + segment: layer0other, + storageClassName: "triple-sc", + quantity: "6Mi", + }, + }, + expectedCapacities: []testCapacity{ + { + uid: "test-capacity-1", + resourceVersion: csiscRev + "1", + segment: layer0, + storageClassName: "direct-sc", + quantity: "1Gi", + }, + { + uid: "test-capacity-2", + resourceVersion: csiscRev + "1", + segment: layer0, + storageClassName: "triple-sc", + quantity: "3Gi", + }, + { + uid: "test-capacity-3", + resourceVersion: csiscRev + "1", + segment: layer0other, + storageClassName: "direct-sc", + quantity: "2Gi", + }, + { + uid: "test-capacity-4", + resourceVersion: csiscRev + "1", + segment: layer0other, + storageClassName: "triple-sc", + quantity: "6Gi", + }, + }, + }, + "two segments, two classes, two added, two removed": { + topology: mockTopology{ + segments: []*topology.Segment{ + &layer0, + &layer0other, + }, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + "bar": "2Gi", + }, + }, + initialSCs: []testSC{ + { + name: "direct-sc", + driverName: driverName, + }, + { + name: "triple-sc", + driverName: driverName, + parameters: map[string]string{ + mockMultiplier: "3", + }, + }, + }, + initialCapacities: []testCapacity{ + { + uid: "test-capacity-1", + segment: layer0, + storageClassName: "old-direct-sc", + quantity: "1Mi", + }, + { + uid: "test-capacity-2", + segment: layer0, + storageClassName: "old-triple-sc", + quantity: "3Mi", + }, + { + uid: "test-capacity-3", + segment: layer0other, + storageClassName: "direct-sc", + quantity: "2Mi", + }, + { + uid: "test-capacity-4", + segment: layer0other, + storageClassName: "triple-sc", + quantity: "6Mi", + }, + }, + expectedCapacities: []testCapacity{ + { + resourceVersion: csiscRev + "0", + segment: layer0, + storageClassName: "direct-sc", + quantity: "1Gi", + }, + { + resourceVersion: csiscRev + "0", + segment: layer0, + storageClassName: "triple-sc", + quantity: "3Gi", + }, + { + uid: "test-capacity-3", + resourceVersion: csiscRev + "1", + segment: layer0other, + storageClassName: "direct-sc", + quantity: "2Gi", + }, + { + uid: "test-capacity-4", + resourceVersion: csiscRev + "1", + segment: layer0other, + storageClassName: "triple-sc", + quantity: "6Gi", + }, + }, + }, + "fix modified capacity": { + topology: mockTopology{ + segments: []*topology.Segment{&layer0}, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + }, + }, + initialSCs: []testSC{ + { + name: "other-sc", + driverName: driverName, + }, + }, + expectedCapacities: []testCapacity{ + { + uid: "CSISC-UID-1", + resourceVersion: csiscRev + "0", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + modify: func(ctx context.Context, clientSet *fakeclientset.Clientset, expected []testCapacity) ([]testCapacity, error) { + capacities, err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + capacity := capacities.Items[0] + capacity.Capacity = &mb + if _, err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).Update(ctx, &capacity, metav1.UpdateOptions{}); err != nil { + return nil, err + } + expected[0].resourceVersion = csiscRev + "2" + return expected, nil + }, + }, + "re-create capacity": { + topology: mockTopology{ + segments: []*topology.Segment{&layer0}, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + }, + }, + initialSCs: []testSC{ + { + name: "other-sc", + driverName: driverName, + }, + }, + expectedCapacities: []testCapacity{ + { + uid: "CSISC-UID-1", + resourceVersion: csiscRev + "0", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + modify: func(ctx context.Context, clientSet *fakeclientset.Clientset, expected []testCapacity) ([]testCapacity, error) { + capacities, err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + capacity := capacities.Items[0] + if err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).Delete(ctx, capacity.Name, metav1.DeleteOptions{}); err != nil { + return nil, err + } + expected[0].uid = "CSISC-UID-2" + return expected, nil + }, + }, + "delete redundant capacity": { + modify: func(ctx context.Context, clientSet *fakeclientset.Clientset, expected []testCapacity) ([]testCapacity, error) { + capacity := makeCapacity(testCapacity{quantity: "1Gi"}) + if _, err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).Create(ctx, capacity, metav1.CreateOptions{}); err != nil { + return nil, err + } + return expected, nil + }, + }, + "ignore capacity after owner change": { + topology: mockTopology{ + segments: []*topology.Segment{&layer0}, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + }, + }, + initialSCs: []testSC{ + { + name: "other-sc", + driverName: driverName, + }, + }, + expectedCapacities: []testCapacity{ + { + uid: "CSISC-UID-1", + resourceVersion: csiscRev + "0", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + modify: func(ctx context.Context, clientSet *fakeclientset.Clientset, expected []testCapacity) ([]testCapacity, error) { + capacities, err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + capacity := capacities.Items[0] + // Unset owner. It's not clear why anyone would want to do that, but lets deal with it anyway: + // - the now "foreign" object must be left alone + // - an entry must be created anew + capacity.OwnerReferences = []metav1.OwnerReference{} + if _, err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).Update(ctx, &capacity, metav1.UpdateOptions{}); err != nil { + return nil, err + } + expected[0].owner = &noOwner + expected[0].resourceVersion = csiscRev + "1" + expected = append(expected, testCapacity{ + uid: "CSISC-UID-2", + resourceVersion: csiscRev + "0", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }) + return expected, nil + }, + }, + "delete and recreate by someone": { + topology: mockTopology{ + segments: []*topology.Segment{&layer0}, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + }, + }, + initialSCs: []testSC{ + { + name: "other-sc", + driverName: driverName, + }, + }, + expectedCapacities: []testCapacity{ + { + uid: "CSISC-UID-1", + resourceVersion: csiscRev + "0", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + modify: func(ctx context.Context, clientSet *fakeclientset.Clientset, expected []testCapacity) ([]testCapacity, error) { + capacities, err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + capacity := capacities.Items[0] + // Delete and recreate with wrong capacity. This changes the UID while keeping the name + // the same. The capacity then must get corrected by the controller. + if err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).Delete(ctx, capacity.Name, metav1.DeleteOptions{}); err != nil { + return nil, err + } + capacity.UID = "CSISC-UID-2" + capacity.Capacity = &mb + if _, err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).Create(ctx, &capacity, metav1.CreateOptions{}); err != nil { + return nil, err + } + expected[0].uid = capacity.UID + expected[0].resourceVersion = csiscRev + "1" + return expected, nil + }, + }, + "storage capacity change": { + topology: mockTopology{ + segments: []*topology.Segment{&layer0}, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + }, + }, + initialSCs: []testSC{ + { + name: "other-sc", + driverName: driverName, + }, + }, + expectedCapacities: []testCapacity{ + { + uid: "CSISC-UID-1", + resourceVersion: csiscRev + "0", + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + capacityChange: func(ctx context.Context, storage *mockCapacity, expected []testCapacity) []testCapacity { + storage.capacity["foo"] = "2Gi" + expected[0].quantity = "2Gi" + expected[0].resourceVersion = csiscRev + "1" + return expected + }, + }, + } + + for name, tc := range testcases { + // Not run in parallel. That doesn't work well in combination with global logging. + t.Run(name, func(t *testing.T) { + // There is no good way to shut down the controller. It spawns + // various goroutines and some of them (in particular shared informer) + // become very unhappy ("close on closed channel") when using a context + // that gets cancelled. Therefore we just keep everything running. + ctx := context.Background() + + var objects []runtime.Object + objects = append(objects, makeSCs(tc.initialSCs)...) + clientSet := fakeclientset.NewSimpleClientset(objects...) + clientSet.PrependReactor("create", "csistoragecapacities", createCSIStorageCapacityReactor()) + clientSet.PrependReactor("update", "csistoragecapacities", updateCSIStorageCapacityReactor()) + c := fakeController(ctx, clientSet, &tc.storage, &tc.topology) + for _, testCapacity := range tc.initialCapacities { + capacity := makeCapacity(testCapacity) + _, err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).Create(ctx, capacity, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + } + c.prepare(ctx) + if err := process(ctx, c); err != nil { + t.Fatalf("unexpected processing error: %v", err) + } + err := validateCapacities(ctx, clientSet, tc.expectedCapacities) + if err != nil { + t.Fatalf("%v", err) + } + + // Now (optionally) modify the state and + // ensure that eventually the controller + // catches up. + expectedCapacities := tc.expectedCapacities + if tc.modify != nil { + klog.Info("modifying objects") + ec, err := tc.modify(ctx, clientSet, expectedCapacities) + if err != nil { + t.Fatalf("modify objects: %v", err) + } + expectedCapacities = ec + if err := validateCapacitiesEventually(ctx, c, clientSet, expectedCapacities); err != nil { + t.Fatalf("modified objects: %v", err) + } + } + if tc.capacityChange != nil { + klog.Info("modifying capacity") + expectedCapacities = tc.capacityChange(ctx, &tc.storage, expectedCapacities) + c.pollCapacities() + if err := validateCapacitiesEventually(ctx, c, clientSet, expectedCapacities); err != nil { + t.Fatalf("modified capacity: %v", err) + } + } + }) + } +} + +func validateCapacities(ctx context.Context, clientSet *fakeclientset.Clientset, expectedCapacities []testCapacity) error { + actualCapacities, err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("unexpected error: %v", err) + } + var messages []string + if len(actualCapacities.Items) != len(expectedCapacities) { + messages = append(messages, fmt.Sprintf("expected %d CSIStorageCapacity objects, got %d", len(expectedCapacities), len(actualCapacities.Items))) + } +nextActual: + for _, actual := range actualCapacities.Items { + for i, expected := range expectedCapacities { + expectedOwnerReferences := makeCapacity(expected).OwnerReferences + if reflect.DeepEqual(actual.NodeTopology, expected.segment.GetLabelSelector()) && + actual.StorageClassName == expected.storageClassName && + (len(actual.OwnerReferences) == 0 && len(expectedOwnerReferences) == 0 || + reflect.DeepEqual(actual.OwnerReferences, expectedOwnerReferences)) { + var mismatches []string + if expected.quantity != "" && actual.Capacity == nil { + mismatches = append(mismatches, "unexpected nil quantity") + } + if expected.quantity == "" && actual.Capacity != nil { + mismatches = append(mismatches, "unexpected quantity") + } + if expected.quantity != "" && actual.Capacity.Cmp(*expected.getCapacity()) != 0 { + mismatches = append(mismatches, fmt.Sprintf("expected quantity %v, got %v", expected.quantity, *actual.Capacity)) + } + if expected.uid != "" && actual.UID != expected.uid { + mismatches = append(mismatches, fmt.Sprintf("expected UID %s, got %s", expected.uid, actual.UID)) + } + if expected.resourceVersion != "" && actual.ResourceVersion != expected.resourceVersion { + mismatches = append(mismatches, fmt.Sprintf("expected ResourceVersion %s, got %s", expected.resourceVersion, actual.ResourceVersion)) + } + if len(mismatches) > 0 { + messages = append(messages, fmt.Sprintf("CSIStorageCapacity %+v:\n %s", actual, strings.Join(mismatches, "\n "))) + } + // Never match against the same expected capacity twice. Also, the ones that remain are dumped below. + expectedCapacities = append(expectedCapacities[:i], expectedCapacities[i+1:]...) + continue nextActual + } + } + messages = append(messages, fmt.Sprintf("unexpected CSIStorageCapacity %#v", actual)) + } + for _, expected := range expectedCapacities { + messages = append(messages, fmt.Sprintf("expected CSIStorageCapacity %+v not found", expected)) + } + if len(messages) > 0 { + return errors.New(strings.Join(messages, "\n")) + } + return nil +} + +func validateCapacitiesEventually(ctx context.Context, c *Controller, clientSet *fakeclientset.Clientset, expectedCapacities []testCapacity) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + deadline, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + var lastValidationError error + klog.Info("waiting for controller to catch up") + for { + select { + case <-ticker.C: + if err := process(ctx, c); err != nil { + return fmt.Errorf("unexpected processing error: %v", err) + } + lastValidationError = validateCapacities(ctx, clientSet, expectedCapacities) + if lastValidationError == nil { + return nil + } + case <-deadline.Done(): + return fmt.Errorf("timed out waiting for controller, last unexpected state:\n%v", lastValidationError) + } + } +} + +// createCSIStorageCapacityReactor implements the logic required for the GenerateName and UID fields to work when using +// the fake client. Add it with client.PrependReactor to your fake client. +func createCSIStorageCapacityReactor() func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + var uidCounter int + var mutex sync.Mutex + return func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + s := action.(ktesting.CreateAction).GetObject().(*storagev1alpha1.CSIStorageCapacity) + if s.Name == "" && s.GenerateName != "" { + s.Name = fmt.Sprintf("%s-%s", s.GenerateName, krand.String(16)) + } + if s.UID == "" { + mutex.Lock() + defer mutex.Unlock() + uidCounter++ + s.UID = types.UID(fmt.Sprintf("CSISC-UID-%d", uidCounter)) + } + s.ResourceVersion = csiscRev + "0" + return false, nil, nil + } +} + +// updateCSIStorageCapacityReactor implements the logic required for the ResourceVersion field to work when using +// the fake client. Add it with client.PrependReactor to your fake client. +func updateCSIStorageCapacityReactor() func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + return func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + s := action.(ktesting.UpdateAction).GetObject().(*storagev1alpha1.CSIStorageCapacity) + if !strings.HasPrefix(s.ResourceVersion, csiscRev) { + return false, nil, fmt.Errorf("resource version %q should have prefix %s", s.ResourceVersion, csiscRev) + } + revCounter, err := strconv.Atoi(s.ResourceVersion[len(csiscRev):]) + if err != nil { + return false, nil, fmt.Errorf("resource version %q should have formar %s: %v", s.ResourceVersion, csiscRev, err) + } + s.ResourceVersion = csiscRev + fmt.Sprintf("%d", revCounter+1) + return false, nil, nil + } +} + +func fakeController(ctx context.Context, client *fakeclientset.Clientset, storage CSICapacityClient, topologyInformer topology.Informer) *Controller { + utilruntime.ReallyCrash = false // avoids os.Exit after "close of closed channel" in shared informer code + + // We don't need resyncs, they just lead to confusing log output if they get triggered while already some + // new test is running. + resyncPeriod := time.Hour + informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod) + scInformer := informerFactory.Storage().V1().StorageClasses() + cInformer := informerFactory.Storage().V1alpha1().CSIStorageCapacities() + rateLimiter := workqueue.NewItemExponentialFailureRateLimiter(time.Second, 2*time.Second) + queue := workqueue.NewNamedRateLimitingQueue(rateLimiter, "items") + + c := NewCentralCapacityController( + storage, + driverName, + client, + queue, + defaultOwner, + ownerNamespace, + topologyInformer, + scInformer, + cInformer, + 1000*time.Hour, // Not used, but even if it was, we wouldn't want automatic capacity polling while the test runs... + ) + + // This ensures that the informers are running and up-to-date. + go informerFactory.Start(ctx.Done()) + informerFactory.WaitForCacheSync(ctx.Done()) + + return c +} + +// process handles work items until the queue is empty and the informers are synced. +func process(ctx context.Context, c *Controller) error { + for { + if c.queue.Len() == 0 { + done, err := storageClassesSynced(ctx, c) + if err != nil { + return fmt.Errorf("check storage classes: %v", err) + } + if done { + return nil + } + } + // There's no atomic "try to get a work item". Let's + // check one more time before potentially blocking + // in c.queue.Get(). + len := c.queue.Len() + if len > 0 { + klog.V(1).Infof("testing next work item, queue length %d", len) + c.processNextWorkItem(ctx) + klog.V(5).Infof("done testing next work item") + } + } +} + +func storageClassesSynced(ctx context.Context, c *Controller) (bool, error) { + actualStorageClasses, err := c.client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + informerStorageClasses, err := c.scInformer.Lister().List(labels.Everything()) + if len(informerStorageClasses) != len(actualStorageClasses.Items) { + return false, nil + } + if len(informerStorageClasses) > 0 && !func() bool { + for _, actualStorageClass := range actualStorageClasses.Items { + for _, informerStorageClass := range informerStorageClasses { + if reflect.DeepEqual(actualStorageClass, *informerStorageClass) { + return true + } + } + } + return false + }() { + return false, nil + } + + return true, nil +} + +const ( + mockMultiplier = "multiplier" +) + +// mockGetCapacity simulates a driver with a layered storage system: +// storage exists at each level with different quantities (one pool for all nodes, +// one pool for each data center, one pool for reach region). +// +// It uses "layer1", "layer2", ... etc. as topology keys to dive into +// the map, which then either has a string or another map. +// A fake "multiplier" parameter is applied to the resulting capacity. +type mockCapacity struct { + capacity map[string]interface{} +} + +func (mc *mockCapacity) GetCapacity(ctx context.Context, in *csi.GetCapacityRequest, opts ...grpc.CallOption) (*csi.GetCapacityResponse, error) { + available := "" + if in.AccessibleTopology != nil { + var err error + available, err = getCapacity(mc.capacity, in.AccessibleTopology.Segments, 0) + if err != nil { + return nil, err + } + } + resp := &csi.GetCapacityResponse{} + if available != "" { + quantity := resource.MustParse(available) + resp.AvailableCapacity = quantity.Value() + } + multiplierStr, ok := in.Parameters[mockMultiplier] + if ok { + multiplier, err := strconv.Atoi(multiplierStr) + if err != nil { + return nil, fmt.Errorf("invalid parameter %s -> %s: %v", mockMultiplier, multiplierStr, err) + } + resp.AvailableCapacity *= int64(multiplier) + } + return resp, nil +} + +func getCapacity(capacity map[string]interface{}, segments map[string]string, layer int) (string, error) { + if capacity == nil { + return "", fmt.Errorf("no information found at layer %d", layer) + } + key := fmt.Sprintf("layer%d", layer) + value := capacity[segments[key]] + switch value := value.(type) { + case string: + return value, nil + case map[string]interface{}: + result, err := getCapacity(value, segments, layer+1) + if err != nil { + return "", fmt.Errorf("%s -> %s: %v", key, segments[key], err) + } + return result, nil + } + return "", nil +} + +// mockTopology simulates a driver installation on different nodes. +type mockTopology struct { + segments []*topology.Segment + callbacks []topology.Callback +} + +func (mt *mockTopology) AddCallback(cb topology.Callback) { + mt.callbacks = append(mt.callbacks, cb) + cb(mt.segments, nil) +} + +func (mt *mockTopology) List() []*topology.Segment { + return mt.segments +} + +func (mt *mockTopology) Run(ctx context.Context) { +} + +func (mt *mockTopology) HasSynced() bool { + return true +} + +type testCapacity struct { + uid types.UID + resourceVersion string + segment topology.Segment + storageClassName string + quantity string + owner *metav1.OwnerReference +} + +func (tc testCapacity) getCapacity() *resource.Quantity { + if tc.quantity == "" { + return nil + } + quantity := resource.MustParse(tc.quantity) + return &quantity +} + +var capacityCounter int + +func makeCapacity(in testCapacity) *storagev1alpha1.CSIStorageCapacity { + capacityCounter++ + var owners []metav1.OwnerReference + switch in.owner { + case nil: + owners = append(owners, defaultOwner) + case &noOwner: + // Don't add anything. + default: + owners = append(owners, *in.owner) + } + return &storagev1alpha1.CSIStorageCapacity{ + ObjectMeta: metav1.ObjectMeta{ + UID: in.uid, + ResourceVersion: in.resourceVersion, + Name: fmt.Sprintf("csisc-%d", capacityCounter), + OwnerReferences: owners, + }, + NodeTopology: in.segment.GetLabelSelector(), + StorageClassName: in.storageClassName, + Capacity: in.getCapacity(), + } +} + +type testSC struct { + name string + driverName string + parameters map[string]string +} + +func makeSC(in testSC) *storagev1.StorageClass { + return &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: in.name, + }, + Provisioner: in.driverName, + Parameters: in.parameters, + } +} + +func makeSCs(in []testSC) (items []runtime.Object) { + for _, item := range in { + items = append(items, makeSC(item)) + } + return +} diff --git a/pkg/capacity/doc.go b/pkg/capacity/doc.go new file mode 100644 index 0000000000..a0f064c2ba --- /dev/null +++ b/pkg/capacity/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package capacity contains the code which controls the CSIStorageCapacity +// objects owned by the external-provisioner. +package capacity diff --git a/pkg/capacity/features.go b/pkg/capacity/features.go new file mode 100644 index 0000000000..4835ad6d9f --- /dev/null +++ b/pkg/capacity/features.go @@ -0,0 +1,77 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capacity + +import ( + "fmt" + "strings" + + flag "github.com/spf13/pflag" +) + +// Feature is the type for named features supported by the capacity +// controller. +type Feature string + +// Features are disabled by default. +type Features map[Feature]bool + +const ( + // FeatureCentral enables the mode where there is only one + // external-provisioner actively running in the cluster which + // talks to the CSI driver's controller. + FeatureCentral = Feature("central") + + // FeatureLocal enables the mode where external-provisioner + // is deployed on each node. Not implemented yet. + FeatureLocal = Feature("local") +) + +// Set enables the named features. Multiple features can be listed, separated by commas, +// with optional whitespace. +func (features *Features) Set(value string) error { + for _, part := range strings.Split(value, ",") { + part := Feature(strings.TrimSpace(part)) + switch part { + case FeatureCentral: + if *features == nil { + *features = Features{} + } + (*features)[part] = true + case FeatureLocal: + return fmt.Errorf("%s: not implemented yet", part) + case "": + default: + return fmt.Errorf("%s: unknown feature", part) + } + } + return nil +} + +func (features *Features) String() string { + var parts []string + for feature := range *features { + parts = append(parts, string(feature)) + } + return strings.Join(parts, ",") +} + +func (features *Features) Type() string { + return "enumeration" +} + +var _ flag.Value = &Features{} diff --git a/pkg/capacity/features_test.go b/pkg/capacity/features_test.go new file mode 100644 index 0000000000..fcb6e2af1c --- /dev/null +++ b/pkg/capacity/features_test.go @@ -0,0 +1,81 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capacity + +import ( + "reflect" + "testing" +) + +func TestFeatures(t *testing.T) { + tests := []struct { + name string + input []string + expectedOutput Features + expectedError string + }{ + { + name: "empty", + }, + { + name: "central", + input: []string{string(FeatureCentral)}, + expectedOutput: Features{FeatureCentral: true}, + }, + { + name: "local", + input: []string{string(FeatureLocal)}, + expectedError: string(FeatureLocal) + ": not implemented yet", + }, + { + name: "invalid", + input: []string{"no-such-feature"}, + expectedError: "no-such-feature: unknown feature", + }, + { + name: "multi", + input: []string{string(FeatureCentral), string(FeatureCentral)}, + expectedOutput: Features{FeatureCentral: true}, + }, + { + name: "comma", + input: []string{string(FeatureCentral) + " ," + string(FeatureCentral) + " "}, + expectedOutput: Features{FeatureCentral: true}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var actual Features + for _, value := range test.input { + err := actual.Set(value) + if err != nil && test.expectedError != "" { + if err.Error() == test.expectedError { + return + } + t.Fatalf("expected error %q, got %v", test.expectedError, err) + } + if err == nil && test.expectedError != "" { + t.Fatalf("expected error %q, got no error", test.expectedError) + } + } + if !reflect.DeepEqual(actual, test.expectedOutput) { + t.Fatalf("expected %v, got %v", test.expectedOutput, actual) + } + }) + } +} diff --git a/pkg/capacity/topology/doc.go b/pkg/capacity/topology/doc.go new file mode 100644 index 0000000000..6adfde6a43 --- /dev/null +++ b/pkg/capacity/topology/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package topology contains an abstract interface for discovering +// topology segments for a storage backend and a specific implementation +// which does that based on the CSINodeDriver.TopologyKeys and the +// corresponding labels for the nodes. +package topology diff --git a/pkg/capacity/topology/nodes.go b/pkg/capacity/topology/nodes.go new file mode 100644 index 0000000000..adeb9f7f52 --- /dev/null +++ b/pkg/capacity/topology/nodes.go @@ -0,0 +1,288 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topology + +import ( + "context" + "reflect" + "sort" + "sync" + + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + coreinformersv1 "k8s.io/client-go/informers/core/v1" + storageinformersv1 "k8s.io/client-go/informers/storage/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" +) + +// NewNodeTopology returns an informer that synthesizes storage +// topology segments based on the accessible topology that each CSI +// driver node instance reports. See +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/1472-storage-capacity-tracking#with-central-controller +// for details. +func NewNodeTopology( + driverName string, + client kubernetes.Interface, + nodeInformer coreinformersv1.NodeInformer, + csiNodeInformer storageinformersv1.CSINodeInformer, + queue workqueue.RateLimitingInterface, +) Informer { + nt := &nodeTopology{ + driverName: driverName, + client: client, + nodeInformer: nodeInformer, + csiNodeInformer: csiNodeInformer, + queue: queue, + } + + // Whenever Node or CSINode objects change, we need to + // recalculate the new topology segments. We could do that + // immediately, but it is better to let the input data settle + // a bit and just remember that there is work to be done. + nodeHandler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + klog.V(5).Infof("capacity topology: new node: %s", obj.(*v1.Node).Name) + queue.Add("") + }, + UpdateFunc: func(oldObj interface{}, newObj interface{}) { + if reflect.DeepEqual(oldObj.(*v1.Node).Labels, newObj.(*v1.Node).Labels) { + // Shortcut: labels haven't changed, no need to sync. + return + } + klog.V(5).Infof("capacity topology: updated node: %s", newObj.(*v1.Node).Name) + queue.Add("") + }, + DeleteFunc: func(obj interface{}) { + klog.V(5).Infof("capacity topology: removed node: %s", obj.(*v1.Node).Name) + queue.Add("") + }, + } + nodeInformer.Informer().AddEventHandler(nodeHandler) + csiNodeHandler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + klog.V(5).Infof("capacity topology: new CSINode: %s", obj.(*storagev1.CSINode).Name) + queue.Add("") + }, + UpdateFunc: func(oldObj interface{}, newObj interface{}) { + oldKeys := nt.driverTopologyKeys(oldObj.(*storagev1.CSINode)) + newKeys := nt.driverTopologyKeys(newObj.(*storagev1.CSINode)) + if reflect.DeepEqual(oldKeys, newKeys) { + // Shortcut: keys haven't changed, no need to sync. + return + } + klog.V(5).Infof("capacity topology: updated CSINode: %s", newObj.(*storagev1.CSINode).Name) + queue.Add("") + }, + DeleteFunc: func(obj interface{}) { + klog.V(5).Infof("capacity topology: removed CSINode: %s", obj.(*storagev1.CSINode).Name) + queue.Add("") + }, + } + csiNodeInformer.Informer().AddEventHandler(csiNodeHandler) + + return nt +} + +var _ Informer = &nodeTopology{} + +type nodeTopology struct { + driverName string + client kubernetes.Interface + nodeInformer coreinformersv1.NodeInformer + csiNodeInformer storageinformersv1.CSINodeInformer + queue workqueue.RateLimitingInterface + + mutex sync.Mutex + // segments hold a list of all currently known topology segments. + segments []*Segment + // callbacks contains all callbacks that need to be invoked + // after making changes to the list of known segments. + callbacks []Callback +} + +// driverTopologyKeys returns nil if the driver is not running +// on the node, otherwise at least an empty slice of topology keys. +func (nt *nodeTopology) driverTopologyKeys(csiNode *storagev1.CSINode) []string { + for _, csiNodeDriver := range csiNode.Spec.Drivers { + if csiNodeDriver.Name == nt.driverName { + if csiNodeDriver.TopologyKeys == nil { + return []string{} + } + return csiNodeDriver.TopologyKeys + } + } + return nil +} + +func (nt *nodeTopology) AddCallback(cb Callback) { + nt.mutex.Lock() + defer nt.mutex.Unlock() + + nt.callbacks = append(nt.callbacks, cb) +} + +func (nt *nodeTopology) List() []*Segment { + nt.mutex.Lock() + defer nt.mutex.Unlock() + + // We need to return a new slice to protect against future + // changes in nt.segments. The segments themselves are + // immutable and shared. + segments := make([]*Segment, len(nt.segments)) + copy(segments, nt.segments) + return segments +} + +func (nt *nodeTopology) Run(ctx context.Context) { + go nt.nodeInformer.Informer().Run(ctx.Done()) + go nt.csiNodeInformer.Informer().Run(ctx.Done()) + go nt.runWorker(ctx) + + klog.Info("Started node topology informer") + <-ctx.Done() + klog.Info("Shutting node topology informer") +} + +func (nt *nodeTopology) HasSynced() bool { + if nt.nodeInformer.Informer().HasSynced() && + nt.csiNodeInformer.Informer().HasSynced() { + // Now that both informers are up-to-date, use that + // information to update our own view of the world. + nt.sync(context.Background()) + return true + } + return false +} + +func (nt *nodeTopology) runWorker(ctx context.Context) { + for nt.processNextWorkItem(ctx) { + } +} + +func (nt *nodeTopology) processNextWorkItem(ctx context.Context) bool { + obj, shutdown := nt.queue.Get() + if shutdown { + return false + } + defer nt.queue.Done(obj) + nt.sync(ctx) + return true +} + +func (nt *nodeTopology) sync(ctx context.Context) { + // For all nodes on which the driver is registered, collect the topology key/value pairs + // and sort them by key name to make the result deterministic. Skip all segments that have + // been seen before. + segments := nt.List() + removalCandidates := map[*Segment]bool{} + var addedSegments, removedSegments []*Segment + for _, segment := range segments { + // Assume that the segment is removed. Will be set to + // false if we find out otherwise. + removalCandidates[segment] = true + } + + csiNodes, err := nt.csiNodeInformer.Lister().List(labels.Everything()) + if err != nil { + utilruntime.HandleError(err) + return + } + existingSegments := make([]*Segment, 0, len(segments)) +node: + for _, csiNode := range csiNodes { + topologyKeys := nt.driverTopologyKeys(csiNode) + if topologyKeys == nil { + // Driver not running on node, ignore it. + continue + } + node, err := nt.nodeInformer.Lister().Get(csiNode.Name) + if err != nil { + if apierrs.IsNotFound(err) { + // Obsolete CSINode object? Ignore it. + continue + } + // This shouldn't happen. If it does, + // something is very wrong and we give up. + utilruntime.HandleError(err) + return + } + + newSegment := Segment{} + sort.Strings(topologyKeys) + for _, key := range topologyKeys { + value, ok := node.Labels[key] + if !ok { + // The driver announced some topology key and kubelet recorded + // it in CSINode, but we haven't seen the corresponding + // node update yet as the label is not set. Ignore the node + // for now, we'll sync up when we get the node update. + continue node + } + newSegment = append(newSegment, SegmentEntry{key, value}) + } + + // Add it only if new, otherwise look at the next node. + for _, segment := range segments { + if newSegment.Compare(*segment) == 0 { + // Reuse a segment instead of using the new one. This keeps pointers stable. + removalCandidates[segment] = false + existingSegments = append(existingSegments, segment) + continue node + } + } + for _, segment := range addedSegments { + if newSegment.Compare(*segment) == 0 { + // We already discovered this new segment. + continue node + } + } + + // A completely new segment. + addedSegments = append(addedSegments, &newSegment) + existingSegments = append(existingSegments, &newSegment) + } + + // Lock while making changes, but unlock before actually invoking callbacks. + nt.mutex.Lock() + nt.segments = existingSegments + + // Theoretically callbacks could change while we don't have + // the lock, so make a copy. + callbacks := make([]Callback, len(nt.callbacks)) + copy(callbacks, nt.callbacks) + nt.mutex.Unlock() + + for segment, wasRemoved := range removalCandidates { + if wasRemoved { + removedSegments = append(removedSegments, segment) + } + } + if len(addedSegments) > 0 || len(removedSegments) > 0 { + klog.V(5).Infof("topology changed: added %v, removed %v", addedSegments, removedSegments) + for _, cb := range callbacks { + cb(addedSegments, removedSegments) + } + } else { + klog.V(5).Infof("topology unchanged") + } +} diff --git a/pkg/capacity/topology/nodes_test.go b/pkg/capacity/topology/nodes_test.go new file mode 100644 index 0000000000..009aa32997 --- /dev/null +++ b/pkg/capacity/topology/nodes_test.go @@ -0,0 +1,633 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topology + +import ( + "context" + "fmt" + "reflect" + "sort" + "testing" + "time" + + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + fakeclientset "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" +) + +func init() { + klog.InitFlags(nil) +} + +const ( + driverName = "my-csi-driver" + node1 = "node1" + node2 = "node2" +) + +var ( + localStorageKey = "nodename" + localStorageKeys = []string{localStorageKey} + localStorageLabelsNode1 = map[string]string{localStorageKey: node1} + localStorageNode1 = &Segment{ + {localStorageKey, node1}, + } + localStorageLabelsNode2 = map[string]string{localStorageKey: node2} + localStorageNode2 = &Segment{ + {localStorageKey, node2}, + } + networkStorageKeys = []string{"A", "B", "C"} + networkStorageLabels = map[string]string{ + networkStorageKeys[0]: "US", + networkStorageKeys[1]: "NY", + networkStorageKeys[2]: "1", + } + networkStorage = &Segment{ + {networkStorageKeys[0], "US"}, + {networkStorageKeys[1], "NY"}, + {networkStorageKeys[2], "1"}, + } + networkStorageLabels2 = map[string]string{ + networkStorageKeys[0]: "US", + networkStorageKeys[1]: "NY", + networkStorageKeys[2]: "2", + } + networkStorage2 = &Segment{ + {networkStorageKeys[0], "US"}, + {networkStorageKeys[1], "NY"}, + {networkStorageKeys[2], "2"}, + } +) + +func removeNode(t *testing.T, client *fakeclientset.Clientset, nodeName string) { + err := client.CoreV1().Nodes().Delete(context.Background(), nodeName, metav1.DeleteOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func removeCSINode(t *testing.T, client *fakeclientset.Clientset, nodeName string) { + err := client.StorageV1().CSINodes().Delete(context.Background(), nodeName, metav1.DeleteOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +// TestNodeTopology checks that node labels are correctly transformed +// into topology segments. +func TestNodeTopology(t *testing.T) { + testcases := map[string]struct { + driverName string + initialNodes []testNode + expectedSegments []*Segment + update func(t *testing.T, client *fakeclientset.Clientset) + expectedUpdatedSegments []*Segment + }{ + "empty": {}, + "one-node": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode1, + }, + }, + expectedSegments: []*Segment{localStorageNode1}, + }, + "missing-csi-node": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode1, + skipCSINodeCreation: true, + }, + }, + }, + "missing-node": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode1, + skipNodeCreation: true, + }, + }, + }, + "missing-node-labels": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + }, + }, + }, + "two-nodes": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode1, + }, + { + name: node2, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode2, + }, + }, + expectedSegments: []*Segment{localStorageNode1, localStorageNode2}, + }, + "shared-storage": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode1, + }, + { + name: node2, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode1, + }, + }, + expectedSegments: []*Segment{localStorageNode1}, + }, + "other-shared-storage": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode2, + }, + { + name: node2, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode2, + }, + }, + expectedSegments: []*Segment{localStorageNode2}, + }, + "deep-topology": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: networkStorageKeys, + }, + labels: networkStorageLabels, + }, + { + name: node2, + driverKeys: map[string][]string{ + driverName: networkStorageKeys, + }, + labels: networkStorageLabels, + }, + }, + expectedSegments: []*Segment{networkStorage}, + }, + "mixed-topology": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode1, + }, + { + name: node2, + driverKeys: map[string][]string{ + driverName: networkStorageKeys, + }, + labels: networkStorageLabels, + }, + }, + expectedSegments: []*Segment{localStorageNode1, networkStorage}, + }, + "partial-match": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: networkStorageKeys, + }, + labels: networkStorageLabels, + }, + { + name: node2, + driverKeys: map[string][]string{ + driverName: networkStorageKeys, + }, + labels: networkStorageLabels2, + }, + }, + expectedSegments: []*Segment{networkStorage, networkStorage2}, + }, + "unsorted-keys": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + // This node reports keys in reverse order, which must not make a difference. + driverName: []string{networkStorageKeys[2], networkStorageKeys[1], networkStorageKeys[0]}, + }, + labels: networkStorageLabels, + }, + { + name: node2, + driverKeys: map[string][]string{ + driverName: networkStorageKeys, + }, + labels: networkStorageLabels, + }, + }, + expectedSegments: []*Segment{networkStorage}, + }, + "wrong-driver": { + driverName: "other-driver", + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode1, + }, + }, + }, + "remove-csi-node": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode1, + }, + }, + expectedSegments: []*Segment{localStorageNode1}, + update: func(t *testing.T, client *fakeclientset.Clientset) { + removeCSINode(t, client, node1) + }, + }, + "remove-node": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode1, + }, + }, + expectedSegments: []*Segment{localStorageNode1}, + update: func(t *testing.T, client *fakeclientset.Clientset) { + removeNode(t, client, node1) + }, + }, + "remove-driver": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode1, + }, + }, + expectedSegments: []*Segment{localStorageNode1}, + update: func(t *testing.T, client *fakeclientset.Clientset) { + csiNode, err := client.StorageV1().CSINodes().Get(context.Background(), node1, metav1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + csiNode.Spec.Drivers = nil + if _, err := client.StorageV1().CSINodes().Update(context.Background(), csiNode, metav1.UpdateOptions{}); err != nil { + t.Fatalf("unexpected error: %v", err) + } + }, + }, + "change-labels": { + initialNodes: []testNode{ + { + name: node1, + driverKeys: map[string][]string{ + driverName: localStorageKeys, + }, + labels: localStorageLabelsNode1, + }, + }, + expectedSegments: []*Segment{localStorageNode1}, + update: func(t *testing.T, client *fakeclientset.Clientset) { + node, err := client.CoreV1().Nodes().Get(context.Background(), node1, metav1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // This isn't a realistic test case because CSI drivers cannot change their topology? + // We support it anyway. + node.Labels[localStorageKey] = node2 + if _, err := client.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{}); err != nil { + t.Fatalf("unexpected error: %v", err) + } + }, + expectedUpdatedSegments: []*Segment{localStorageNode2}, + }, + } + + for name, tc := range testcases { + // Not run in parallel. That doesn't work well in combination with global logging. + t.Run(name, func(t *testing.T) { + // There is no good way to shut down the informers. They spawn + // various goroutines and some of them (in particular shared informer) + // become very unhappy ("close on closed channel") when using a context + // that gets cancelled. Therefore we just keep everything running. + // + // The informers also catch up with changes made via the client API + // asynchronously. To ensure expected input for sync(), we wait until + // the content of the informers is identical to what is currently stored. + ctx := context.Background() + + testDriverName := tc.driverName + if testDriverName == "" { + testDriverName = driverName + } + + var objects []runtime.Object + objects = append(objects, makeNodes(tc.initialNodes)...) + clientSet := fakeclientset.NewSimpleClientset(objects...) + nt := fakeNodeTopology(ctx, testDriverName, clientSet) + if err := waitForInformers(ctx, nt); err != nil { + t.Fatalf("unexpected error: %v", err) + } + validate(t, nt, tc.expectedSegments, nil, tc.expectedSegments) + + if tc.update != nil { + tc.update(t, clientSet) + if err := waitForInformers(ctx, nt); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Determine the expected changes based on the delta. + var expectedAdded, expectedRemoved []*Segment + for _, segment := range tc.expectedUpdatedSegments { + if !containsSegment(tc.expectedSegments, segment) { + expectedAdded = append(expectedAdded, segment) + } + } + for _, segment := range tc.expectedSegments { + if !containsSegment(tc.expectedUpdatedSegments, segment) { + expectedRemoved = append(expectedRemoved, segment) + } + } + validate(t, nt, expectedAdded, expectedRemoved, tc.expectedUpdatedSegments) + } + }) + } +} + +type segmentsFound map[*Segment]bool + +func (sf segmentsFound) Found() []*Segment { + var found []*Segment + for key, value := range sf { + if value { + found = append(found, key) + } + } + return found +} + +func addTestCallback(nt *nodeTopology) (added, removed segmentsFound, called *bool) { + added = segmentsFound{} + removed = segmentsFound{} + called = new(bool) + nt.AddCallback(func(a, r []*Segment) { + *called = true + for _, segment := range a { + added[segment] = true + } + for _, segment := range r { + removed[segment] = true + } + }) + return +} + +func containsSegment(segments []*Segment, segment *Segment) bool { + for _, s := range segments { + if s.Compare(*segment) == 0 { + return true + } + } + return false +} + +func fakeNodeTopology(ctx context.Context, testDriverName string, client *fakeclientset.Clientset) *nodeTopology { + // We don't need resyncs, they just lead to confusing log output if they get triggered while already some + // new test is running. + informerFactory := informers.NewSharedInformerFactory(client, 0*time.Second /* no resync */) + nodeInformer := informerFactory.Core().V1().Nodes() + csiNodeInformer := informerFactory.Storage().V1().CSINodes() + rateLimiter := workqueue.NewItemExponentialFailureRateLimiter(time.Second, 2*time.Second) + queue := workqueue.NewNamedRateLimitingQueue(rateLimiter, "items") + + nt := NewNodeTopology( + testDriverName, + client, + nodeInformer, + csiNodeInformer, + queue, + ).(*nodeTopology) + + go informerFactory.Start(ctx.Done()) + informerFactory.WaitForCacheSync(ctx.Done()) + + return nt +} + +func waitForInformers(ctx context.Context, nt *nodeTopology) error { + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + err := wait.PollImmediateUntil(time.Millisecond, func() (bool, error) { + actualNodes, err := nt.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + informerNodes, err := nt.nodeInformer.Lister().List(labels.Everything()) + if len(informerNodes) != len(actualNodes.Items) { + return false, nil + } + if len(informerNodes) > 0 && !func() bool { + for _, actualNode := range actualNodes.Items { + for _, informerNode := range informerNodes { + if reflect.DeepEqual(actualNode, *informerNode) { + return true + } + } + } + return false + }() { + return false, nil + } + + actualCSINodes, err := nt.client.StorageV1().CSINodes().List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + informerCSINodes, err := nt.csiNodeInformer.Lister().List(labels.Everything()) + if len(informerCSINodes) != len(actualCSINodes.Items) { + return false, nil + } + if len(informerCSINodes) > 0 && !func() bool { + for _, actualCSINode := range actualCSINodes.Items { + for _, informerCSINode := range informerCSINodes { + if reflect.DeepEqual(actualCSINode, *informerCSINode) { + return true + } + } + } + return false + }() { + return false, nil + } + + return true, nil + }, ctx.Done()) + if err != nil { + return fmt.Errorf("get informers in sync: %v", err) + } + return nil +} + +func validate(t *testing.T, nt *nodeTopology, expectedAdded, expectedRemoved, expectedAll []*Segment) { + added, removed, called := addTestCallback(nt) + nt.sync(context.Background()) + expectedChanges := len(expectedAdded) > 0 || len(expectedRemoved) > 0 + if expectedChanges && !*called { + t.Error("change callback not invoked") + } + if !expectedChanges && *called { + t.Error("change callback invoked unexpectedly") + } + validateSegments(t, "added", added.Found(), expectedAdded) + validateSegments(t, "removed", removed.Found(), expectedRemoved) + validateSegments(t, "final", nt.List(), expectedAll) + + if t.Failed() { + t.FailNow() + } +} + +func validateSegments(t *testing.T, what string, actual, expected []*Segment) { + // We can just compare the string representation because that covers all + // relevant content of the segments and is readable. + found := map[string]bool{} + for _, str := range segmentsToStrings(expected) { + found[str] = false + } + for _, str := range segmentsToStrings(actual) { + _, exists := found[str] + if !exists { + t.Errorf("unexpected %s segment: %s", what, str) + t.Fail() + continue + } + found[str] = true + } + for str, matched := range found { + if !matched { + t.Errorf("expected %s segment not found: %s", what, str) + t.Fail() + } + } +} + +func segmentsToStrings(segments []*Segment) []string { + str := []string{} + for _, segment := range segments { + str = append(str, segment.SimpleString()) + } + sort.Strings(str) + return str +} + +type testNode struct { + name string + driverKeys map[string][]string + labels map[string]string + skipNodeCreation, skipCSINodeCreation bool +} + +func makeNodes(nodes []testNode) []runtime.Object { + var objects []runtime.Object + + for _, node := range nodes { + if !node.skipNodeCreation { + objects = append(objects, &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: node.name, + Labels: node.labels, + }, + }) + } + if !node.skipCSINodeCreation { + csiNode := &storagev1.CSINode{ + ObjectMeta: metav1.ObjectMeta{ + Name: node.name, + }, + } + for driver, keys := range node.driverKeys { + csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, + storagev1.CSINodeDriver{ + Name: driver, + TopologyKeys: keys, + }) + } + objects = append(objects, csiNode) + } + } + return objects +} diff --git a/pkg/capacity/topology/topology.go b/pkg/capacity/topology/topology.go new file mode 100644 index 0000000000..cf9471878a --- /dev/null +++ b/pkg/capacity/topology/topology.go @@ -0,0 +1,137 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topology + +import ( + "context" + "fmt" + "sort" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Segment represents a topology segment. Entries are always sorted by +// key and keys are unique. In contrast to a map, segments therefore +// can be compared efficiently. A nil segment matches no nodes +// in a cluster, an empty segment all of them. +type Segment []SegmentEntry + +var _ sort.Interface = Segment{} + +// String returns the address *and* the content of the segment; the address +// is how the segment is identified when used as a hash key. +func (s *Segment) String() string { + return fmt.Sprintf("%p = %s", s, s.SimpleString()) +} + +// SimpleString only returns the content. +func (s *Segment) SimpleString() string { + var parts []string + for _, entry := range *s { + parts = append(parts, entry.String()) + } + return strings.Join(parts, "+ ") +} + +// Compare returns -1 if s is considered smaller than the other segment (less keys, +// keys and/or values smaller), 0 if equal and 1 otherwise. +func (s Segment) Compare(other Segment) int { + if len(s) < len(other) { + return -1 + } + if len(s) > len(other) { + return 1 + } + for i := 0; i < len(s); i++ { + cmp := s[i].Compare(other[i]) + if cmp != 0 { + return cmp + } + } + return 0 +} + +func (s Segment) Len() int { return len(s) } +func (s Segment) Less(i, j int) bool { return s[i].Compare(s[j]) < 0 } +func (s Segment) Swap(i, j int) { + entry := s[i] + s[i] = s[j] + s[j] = entry +} + +// SegmentEntry represents one topology key/value pair. +type SegmentEntry struct { + Key, Value string +} + +func (se SegmentEntry) String() string { + return se.Key + ": " + se.Value +} + +// Compare returns -1 if se is considered smaller than the other segment entry (key or value smaller), +// 0 if equal and 1 otherwise. +func (se SegmentEntry) Compare(other SegmentEntry) int { + cmp := strings.Compare(se.Key, other.Key) + if cmp != 0 { + return cmp + } + return strings.Compare(se.Value, other.Value) +} + +// GetLabelSelector returns a LabelSelector with the key/value entries +// as label match criteria. +func (s Segment) GetLabelSelector() *metav1.LabelSelector { + return &metav1.LabelSelector{ + MatchLabels: s.GetLabelMap(), + } +} + +// GetLabelMap returns nil if the Segment itself is nil, +// otherwise a map with all key/value pairs. +func (s Segment) GetLabelMap() map[string]string { + if s == nil { + return nil + } + labels := map[string]string{} + for _, entry := range s { + labels[entry.Key] = entry.Value + } + return labels +} + +// Informer keeps a list of discovered topology segments and can +// notify one or more clients when it discovers changes. Segments +// are identified by their address and guaranteed to be unique. +type Informer interface { + // AddCallback ensures that the function is called each time + // changes to the list of segments are detected. It also gets + // called immediately when adding the callback and there are + // already some known segments. + AddCallback(cb Callback) + + // List returns all known segments, in no particular order. + List() []*Segment + + // Run starts watching for changes. + Run(ctx context.Context) + + // HasSynced returns true once all segments have been found. + HasSynced() bool +} + +type Callback func(added []*Segment, removed []*Segment) diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go new file mode 100644 index 0000000000..82a473bb14 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go @@ -0,0 +1,127 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rand provides utilities related to randomization. +package rand + +import ( + "math/rand" + "sync" + "time" +) + +var rng = struct { + sync.Mutex + rand *rand.Rand +}{ + rand: rand.New(rand.NewSource(time.Now().UnixNano())), +} + +// Int returns a non-negative pseudo-random int. +func Int() int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Int() +} + +// Intn generates an integer in range [0,max). +// By design this should panic if input is invalid, <= 0. +func Intn(max int) int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Intn(max) +} + +// IntnRange generates an integer in range [min,max). +// By design this should panic if input is invalid, <= 0. +func IntnRange(min, max int) int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Intn(max-min) + min +} + +// IntnRange generates an int64 integer in range [min,max). +// By design this should panic if input is invalid, <= 0. +func Int63nRange(min, max int64) int64 { + rng.Lock() + defer rng.Unlock() + return rng.rand.Int63n(max-min) + min +} + +// Seed seeds the rng with the provided seed. +func Seed(seed int64) { + rng.Lock() + defer rng.Unlock() + + rng.rand = rand.New(rand.NewSource(seed)) +} + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) +// from the default Source. +func Perm(n int) []int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Perm(n) +} + +const ( + // We omit vowels from the set of available characters to reduce the chances + // of "bad words" being formed. + alphanums = "bcdfghjklmnpqrstvwxz2456789" + // No. of bits required to index into alphanums string. + alphanumsIdxBits = 5 + // Mask used to extract last alphanumsIdxBits of an int. + alphanumsIdxMask = 1<>= alphanumsIdxBits + remaining-- + } + return string(b) +} + +// SafeEncodeString encodes s using the same characters as rand.String. This reduces the chances of bad words and +// ensures that strings generated from hash functions appear consistent throughout the API. +func SafeEncodeString(s string) string { + r := make([]byte, len(s)) + for i, b := range []rune(s) { + r[i] = alphanums[(int(b) % len(alphanums))] + } + return string(r) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7ff9dabe01..09c0aa6cf5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -288,6 +288,7 @@ k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/rand k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/strategicpatch From 35a200b9c3a45ec4da8dc64f9771bf922705595f Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 10 Aug 2020 15:04:46 +0200 Subject: [PATCH 2/8] capacity: walk up ownership chain Generic code from the controller runtime is used to retrieve unstructured objects. This is needed for deployment via Deployment, because the direct parent of the pod is then a ReplicaSet which itself will get deleted by the Deployment when rolling out changes. There are intentionally no unit tests for the feature because that would bring in even more additional dependencies. --- README.md | 10 +- cmd/csi-provisioner/csi-provisioner.go | 26 +- deploy/kubernetes/rbac.yaml | 9 +- deploy/kubernetes/storage-capacity.yaml | 57 ++ go.mod | 1 + go.sum | 23 + pkg/owner/owner.go | 99 +++ vendor/github.com/hashicorp/golang-lru/go.mod | 2 + vendor/github.com/hashicorp/golang-lru/lru.go | 48 +- .../hashicorp/golang-lru/simplelru/lru.go | 16 + .../golang-lru/simplelru/lru_interface.go | 7 +- .../x/net/http2/client_conn_pool.go | 8 +- vendor/golang.org/x/net/http2/flow.go | 2 + .../golang.org/x/net/http2/hpack/huffman.go | 7 + vendor/golang.org/x/net/http2/http2.go | 7 + vendor/golang.org/x/net/http2/server.go | 8 +- vendor/golang.org/x/net/http2/transport.go | 12 +- vendor/golang.org/x/net/ipv4/header.go | 5 +- vendor/gopkg.in/yaml.v2/apic.go | 1 + .../restmapper/category_expansion.go | 119 +++ .../k8s.io/client-go/restmapper/discovery.go | 338 ++++++++ .../k8s.io/client-go/restmapper/shortcut.go | 172 +++++ vendor/modules.txt | 10 +- vendor/sigs.k8s.io/controller-runtime/LICENSE | 201 +++++ .../pkg/client/apiutil/apimachinery.go | 97 +++ .../pkg/client/apiutil/dynamicrestmapper.go | 323 ++++++++ .../controller-runtime/pkg/client/client.go | 208 +++++ .../pkg/client/client_cache.go | 140 ++++ .../controller-runtime/pkg/client/codec.go | 24 + .../controller-runtime/pkg/client/doc.go | 49 ++ .../controller-runtime/pkg/client/dryrun.go | 95 +++ .../pkg/client/interfaces.go | 135 ++++ .../controller-runtime/pkg/client/options.go | 720 ++++++++++++++++++ .../controller-runtime/pkg/client/patch.go | 193 +++++ .../controller-runtime/pkg/client/split.go | 61 ++ .../pkg/client/typed_client.go | 201 +++++ .../pkg/client/unstructured_client.go | 273 +++++++ 37 files changed, 3657 insertions(+), 50 deletions(-) create mode 100644 deploy/kubernetes/storage-capacity.yaml create mode 100644 pkg/owner/owner.go create mode 100644 vendor/k8s.io/client-go/restmapper/category_expansion.go create mode 100644 vendor/k8s.io/client-go/restmapper/discovery.go create mode 100644 vendor/k8s.io/client-go/restmapper/shortcut.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/LICENSE create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go diff --git a/README.md b/README.md index 06f9c39ba4..3047bf2150 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,7 @@ See the [storage capacity section](#capacity-support) below for details. * `--enable-capacity `: Enables producing CSIStorageCapacity objects with capacity information from the driver's GetCapacity call. Currently supported: `--enable-capacity=central`. +* `--capacity-ownerref-level `: The level indicates the number of objects that need to be traversed starting from the pod identified by the POD_NAME and POD_NAMESPACE environment variables to reach the owning object for CSIStorageCapacity objects: 0 for the pod itself, 1 for a StatefulSet, 2 for a Deployment, etc. Defaults to `1` (= StatefulSet). #### Other recognized arguments * `--feature-gates `: A set of comma separated `=` pairs that describe feature gates for alpha/experimental features. See [list of features](#feature-status) or `--help` output for list of recognized features. Example: `--feature-gates Topology=true` to enable Topology feature that's disabled by default. @@ -146,6 +147,9 @@ To enable this feature in a driver deployment: Kubernetes scheduler will ignore it. This can be used to first deploy the driver without that flag, then when sufficient information has been published, enabled the scheduler usage of it. +- If external-provisioner is not deployed with a StatefulSet, then + configure with `--capacity-ownerref-level` which object is meant to own + CSIStorageCapacity objects. - Optional: configure how often external-provisioner polls the driver to detect changed capacity with `--capacity-poll-interval`. - Optional: configure how many worker threads are used in parallel @@ -183,9 +187,9 @@ To ensure that CSIStorageCapacity objects get removed when the external-provisioner gets removed from the cluster, they all have an owner and therefore get garbage-collected when that owner disappears. The owner is not the external-provisioner pod itself but -rather its parent. This way, it is possible to switch between -external-provisioner instances without losing the already gathered -information. +rather one of its parents as specified by `--capacity-ownerref-level`. +This way, it is possible to switch between external-provisioner +instances without losing the already gathered information. CSIStorageCapacity objects are namespaced and get created in the namespace of the external-provisioner. Only CSIStorageCapacity objects diff --git a/cmd/csi-provisioner/csi-provisioner.go b/cmd/csi-provisioner/csi-provisioner.go index 5a73164a7e..24b54ef22e 100644 --- a/cmd/csi-provisioner/csi-provisioner.go +++ b/cmd/csi-provisioner/csi-provisioner.go @@ -28,7 +28,7 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" flag "github.com/spf13/pflag" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" @@ -48,6 +48,7 @@ import ( "github.com/kubernetes-csi/external-provisioner/pkg/capacity" "github.com/kubernetes-csi/external-provisioner/pkg/capacity/topology" ctrl "github.com/kubernetes-csi/external-provisioner/pkg/controller" + "github.com/kubernetes-csi/external-provisioner/pkg/owner" snapclientset "github.com/kubernetes-csi/external-snapshotter/v2/pkg/client/clientset/versioned" ) @@ -86,7 +87,8 @@ var ( flag.Var(capacity, "enable-capacity", "Enables producing CSIStorageCapacity objects with capacity information from the driver's GetCapacity call. Currently supported: --enable-capacity=central.") return capacity }() - capacityPollInterval = flag.Duration("capacity-poll-interval", time.Minute, "How long the external-provisioner waits before checking for storage capacity changes.") + capacityPollInterval = flag.Duration("capacity-poll-interval", time.Minute, "How long the external-provisioner waits before checking for storage capacity changes.") + capacityOwnerrefLevel = flag.Int("capacity-ownerref-level", 1, "The level indicates the number of objects that need to be traversed starting from the pod identified by the POD_NAME and POD_NAMESPACE environment variables to reach the owning object for CSIStorageCapacity objects: 0 for the pod itself, 1 for a StatefulSet, 2 for a Deployment, etc.") featureGates map[string]bool provisionController *controller.ProvisionController @@ -286,20 +288,16 @@ func main() { if podName == "" || namespace == "" { klog.Fatalf("need POD_NAMESPACE/POD_NAME env variables, have only POD_NAMESPACE=%q and POD_NAME=%q", namespace, podName) } - pod, err := clientset.CoreV1().Pods(namespace).Get(context.Background(), podName, metav1.GetOptions{}) + controller, err := owner.Lookup(config, namespace, podName, + schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }, *capacityOwnerrefLevel) if err != nil { - klog.Fatalf("error getting our own pod: %v", err) - } - var controller *metav1.OwnerReference - for _, owner := range pod.OwnerReferences { - if owner.Controller != nil && *owner.Controller { - controller = &owner - break - } - } - if controller == nil { - klog.Fatal("pod does not have a controller which owns it") + klog.Fatalf("look up owner(s) of pod: %v", err) } + klog.Infof("using %s/%s %s as owner of CSIStorageCapacity objects", controller.APIVersion, controller.Kind, controller.Name) topologyInformer := topology.NewNodeTopology( provisionerName, diff --git a/deploy/kubernetes/rbac.yaml b/deploy/kubernetes/rbac.yaml index 6e8cdcfcdf..48450f89ac 100644 --- a/deploy/kubernetes/rbac.yaml +++ b/deploy/kubernetes/rbac.yaml @@ -92,9 +92,16 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["csistoragecapacities"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +# The GET permissions below are needed for walking up the ownership chain +# for CSIStorageCapacity. They are sufficient for deployment via +# StatefulSet (only needs to get Pod) and Deployment (needs to get +# Pod and then ReplicaSet to find the Deployment). - apiGroups: [""] resources: ["pods"] - verbs: ["get"] # Needed for CSIStorageCapacity owner determination. + verbs: ["get"] +- apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get"] --- kind: RoleBinding diff --git a/deploy/kubernetes/storage-capacity.yaml b/deploy/kubernetes/storage-capacity.yaml new file mode 100644 index 0000000000..8161eb17d3 --- /dev/null +++ b/deploy/kubernetes/storage-capacity.yaml @@ -0,0 +1,57 @@ +# This YAML file demonstrates how to enable the +# storage capacity feature when deploying the +# external provisioner, in this example together +# with the mock CSI driver. +# +# It depends on the RBAC definitions from rbac.yaml. +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-provisioner +spec: + replicas: 3 + selector: + matchLabels: + app: csi-provisioner + template: + metadata: + labels: + app: csi-provisioner + spec: + serviceAccount: csi-provisioner + containers: + - name: csi-provisioner + image: k8s.gcr.io/sig-storage/csi-provisioner:v2.0.0 + args: + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--enable-capacity=central" + - "--capacity-ownerref-level=2" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/mock.socket + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + - name: mock-driver + image: quay.io/k8scsi/mock-driver:canary + env: + - name: CSI_ENDPOINT + value: /var/lib/csi/sockets/pluginproxy/mock.socket + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + volumes: + - name: socket-dir + emptyDir: diff --git a/go.mod b/go.mod index fb7a96a119..eb34df48a3 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( k8s.io/csi-translation-lib v0.19.0-rc.2 k8s.io/klog v1.0.0 k8s.io/kubernetes v1.19.0-rc.2 + sigs.k8s.io/controller-runtime v0.6.2 sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.1.0-rc1 ) diff --git a/go.sum b/go.sum index 2f1fd5f7f9..47d37ca3db 100644 --- a/go.sum +++ b/go.sum @@ -181,6 +181,8 @@ github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -288,6 +290,7 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= @@ -311,6 +314,8 @@ github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= @@ -419,6 +424,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -429,12 +436,16 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -482,6 +493,7 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= @@ -638,6 +650,8 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -672,6 +686,7 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -740,6 +755,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= +gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= @@ -827,6 +844,8 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -836,6 +855,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.19.0-rc.2 h1:Lq0owhvgpWXmMtz+t2AT/JJpIAPX9X8lK3oE2qslYCU= k8s.io/api v0.19.0-rc.2/go.mod h1:9nHeM2gbqeaL7yN6UFvOxKzLG5gZ4v+DJ6bpavDetZo= +k8s.io/apiextensions-apiserver v0.19.0-rc.2 h1:K57jvXQhrmyr58vEBWlO2eaTpDdtTOOnSIL2cnDc9Oc= k8s.io/apiextensions-apiserver v0.19.0-rc.2/go.mod h1:LkNk/VUFXmwgURxOOQz3FJEjX/Ls0bwkq5/LIGTipIM= k8s.io/apimachinery v0.19.0-rc.2 h1:JScnJRuwKHT8RmdrsFMkE4Oi+SVI/QIWFGOOhNZJe/M= k8s.io/apimachinery v0.19.0-rc.2/go.mod h1:eHbWZVMaaewmYBAUuRYnAmTTMtDhvpPNZuh8/6Yl7v0= @@ -878,6 +898,7 @@ k8s.io/metrics v0.19.0-rc.2/go.mod h1:wtTMGMCxx0brO15Nf1KqwuDjSmH3QoyX6gx8FPnmi4 k8s.io/sample-apiserver v0.19.0-rc.2/go.mod h1:ujEUq5dgRk6COe/8PIP0YM9HrJrjsqh73+UCxQxjZPw= k8s.io/system-validators v1.1.2/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200720150651-0bdb4ca86cbc h1:GiXZzevctVRRBh56shqcqB9s9ReWMU6GTsFyE2RCFJQ= k8s.io/utils v0.0.0-20200720150651-0bdb4ca86cbc/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= @@ -891,6 +912,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/controller-runtime v0.6.2 h1:jkAnfdTYBpFwlmBn3pS5HFO06SfxvnTZ1p5PeEF/zAA= +sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.1.0-rc1 h1:n7bIUaBsWmTUHqwJatYiNa2ZspjeQyzZwxfE4D4G4zQ= sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.1.0-rc1/go.mod h1:N+Ctyyr/Vwp8WkAG6DjpxcG0yWPlKSTj24RjzWzBSME= diff --git a/pkg/owner/owner.go b/pkg/owner/owner.go new file mode 100644 index 0000000000..5782ce31f3 --- /dev/null +++ b/pkg/owner/owner.go @@ -0,0 +1,99 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package owner contains code for walking up the ownership chain, +// starting with an arbitrary object. RBAC rules must allow GET access +// to each object on the chain, at least including the starting +// object, more when walking up more than one level. +package owner + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Lookup walks up the ownership chain zero or more levels and returns an OwnerReference for the +// object. The object identified by name, namespace and type is the starting point and is +// returned when levels is zero. Only APIVersion, Kind, Name, and UID will be set. +// IsController is always true. +func Lookup(config *rest.Config, namespace, name string, gkv schema.GroupVersionKind, levels int) (*metav1.OwnerReference, error) { + c, err := client.New(config, client.Options{}) + if err != nil { + return nil, fmt.Errorf("build client: %v", err) + } + + return lookupRecursive(c, namespace, name, gkv.Group, gkv.Version, gkv.Kind, levels) +} + +func lookupRecursive(c client.Client, namespace, name, group, version, kind string, levels int) (*metav1.OwnerReference, error) { + u := &unstructured.Unstructured{} + apiVersion := metav1.GroupVersion{Group: group, Version: version}.String() + u.SetAPIVersion(apiVersion) + u.SetKind(kind) + + if err := c.Get(context.Background(), client.ObjectKey{ + Namespace: namespace, + Name: name, + }, u); err != nil { + return nil, fmt.Errorf("get object: %v", err) + } + + if levels == 0 { + isTrue := true + return &metav1.OwnerReference{ + APIVersion: apiVersion, + Kind: kind, + Name: name, + UID: u.GetUID(), + Controller: &isTrue, + }, nil + } + owners := u.GetOwnerReferences() + for _, owner := range owners { + if owner.Controller != nil && *owner.Controller { + gv, err := schema.ParseGroupVersion(owner.APIVersion) + if err != nil { + return nil, fmt.Errorf("parse OwnerReference.APIVersion: %v", err) + } + // With this special case here we avoid one lookup and thus the need for + // RBAC GET permission for the parent. For example, when a Pod is controlled + // by a StatefulSet, we only need GET permission for Pods (for the c.Get above) + // but not for StatefulSets. + if levels == 1 { + isTrue := true + return &metav1.OwnerReference{ + APIVersion: owner.APIVersion, + Kind: owner.Kind, + Name: owner.Name, + UID: owner.UID, + Controller: &isTrue, + }, nil + } + + return lookupRecursive(c, namespace, owner.Name, + gv.Group, gv.Version, owner.Kind, + levels-1) + } + } + return nil, fmt.Errorf("%s/%s %q in namespace %q has no controlling owner, cannot unwind the ownership further", + apiVersion, kind, name, namespace) +} diff --git a/vendor/github.com/hashicorp/golang-lru/go.mod b/vendor/github.com/hashicorp/golang-lru/go.mod index 824cb97e83..8ad8826b36 100644 --- a/vendor/github.com/hashicorp/golang-lru/go.mod +++ b/vendor/github.com/hashicorp/golang-lru/go.mod @@ -1 +1,3 @@ module github.com/hashicorp/golang-lru + +go 1.12 diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go index 1cbe04b7d0..4e5e9d8fd0 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -37,7 +37,7 @@ func (c *Cache) Purge() { c.lock.Unlock() } -// Add adds a value to the cache. Returns true if an eviction occurred. +// Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() evicted = c.lru.Add(key, value) @@ -71,8 +71,8 @@ func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { return value, ok } -// ContainsOrAdd checks if a key is in the cache without updating the -// recent-ness or deleting it for being stale, and if not, adds the value. +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() @@ -85,18 +85,52 @@ func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { return false, evicted } +// PeekOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + + previous, ok = c.lru.Peek(key) + if ok { + return previous, true, false + } + + evicted = c.lru.Add(key, value) + return nil, false, evicted +} + // Remove removes the provided key from the cache. -func (c *Cache) Remove(key interface{}) { +func (c *Cache) Remove(key interface{}) (present bool) { c.lock.Lock() - c.lru.Remove(key) + present = c.lru.Remove(key) c.lock.Unlock() + return +} + +// Resize changes the cache size. +func (c *Cache) Resize(size int) (evicted int) { + c.lock.Lock() + evicted = c.lru.Resize(size) + c.lock.Unlock() + return evicted } // RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { +func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) { + c.lock.Lock() + key, value, ok = c.lru.RemoveOldest() + c.lock.Unlock() + return +} + +// GetOldest returns the oldest entry +func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) { c.lock.Lock() - c.lru.RemoveOldest() + key, value, ok = c.lru.GetOldest() c.lock.Unlock() + return } // Keys returns a slice of the keys in the cache, from oldest to newest. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go index 5673773b22..a86c8539e0 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -73,6 +73,9 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) { func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) + if ent.Value.(*entry) == nil { + return nil, false + } return ent.Value.(*entry).value, true } return @@ -142,6 +145,19 @@ func (c *LRU) Len() int { return c.evictList.Len() } +// Resize changes the cache size. +func (c *LRU) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go index 74c7077440..92d70934d6 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -10,7 +10,7 @@ type LRUCache interface { // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) - // Check if a key exsists in cache without updating the recent-ness. + // Checks if a key exists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. @@ -31,6 +31,9 @@ type LRUCache interface { // Returns the number of items in the cache. Len() int - // Clear all cache entries + // Clears all cache entries. Purge() + + // Resizes cache, returning number evicted + Resize(int) int } diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index f4d9b5ece3..3a67636fe2 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -107,6 +107,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis // dialCall is an in-flight Transport dial call to a host. type dialCall struct { + _ incomparable p *clientConnPool done chan struct{} // closed when done res *ClientConn // valid after done is closed @@ -180,6 +181,7 @@ func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) } type addConnCall struct { + _ incomparable p *clientConnPool done chan struct{} // closed when done err error @@ -200,12 +202,6 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { close(c.done) } -func (p *clientConnPool) addConn(key string, cc *ClientConn) { - p.mu.Lock() - p.addConnLocked(key, cc) - p.mu.Unlock() -} - // p.mu must be held func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { for _, v := range p.conns[key] { diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index cea601fcdf..b51f0e0cf1 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -8,6 +8,8 @@ package http2 // flow is the flow control window's size. type flow struct { + _ incomparable + // n is the number of DATA bytes we're allowed to send. // A flow is kept both on a conn and a per-stream. n int32 diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index b412a96c50..a1ab2f0567 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -105,7 +105,14 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { return nil } +// incomparable is a zero-width, non-comparable type. Adding it to a struct +// makes that struct also non-comparable, and generally doesn't add +// any size (as long as it's first). +type incomparable [0]func() + type node struct { + _ incomparable + // children is non-nil for internal nodes children *[256]*node diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 27cc893cc0..5571ccfd26 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -241,6 +241,7 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { + _ incomparable w io.Writer // immutable bw *bufio.Writer // non-nil when data is buffered } @@ -313,6 +314,7 @@ func bodyAllowedForStatus(status int) bool { } type httpError struct { + _ incomparable msg string timeout bool } @@ -376,3 +378,8 @@ func (s *sorter) SortStrings(ss []string) { func validPseudoPath(v string) bool { return (len(v) > 0 && v[0] == '/') || v == "*" } + +// incomparable is a zero-width, non-comparable type. Adding it to a struct +// makes that struct also non-comparable, and generally doesn't add +// any size (as long as it's first). +type incomparable [0]func() diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index bc9e41a1b7..345b7cd85d 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -761,6 +761,7 @@ func (sc *serverConn) readFrames() { // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. type frameWriteResult struct { + _ incomparable wr FrameWriteRequest // what was written (or attempted) err error // result of the writeFrame call } @@ -771,7 +772,7 @@ type frameWriteResult struct { // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { err := wr.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wr, err} + sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err} } func (sc *serverConn) closeAllStreamsOnConnClose() { @@ -1161,7 +1162,7 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { if wr.write.staysWithinBuffer(sc.bw.Available()) { sc.writingFrameAsync = false err := wr.write.writeFrame(sc) - sc.wroteFrame(frameWriteResult{wr, err}) + sc.wroteFrame(frameWriteResult{wr: wr, err: err}) } else { sc.writingFrameAsync = true go sc.writeFrameAsync(wr) @@ -2057,7 +2058,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var trailer http.Header for _, v := range rp.header["Trailer"] { for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + key = http.CanonicalHeaderKey(textproto.TrimString(key)) switch key { case "Transfer-Encoding", "Trailer", "Content-Length": // Bogus. (copy of http1 rules) @@ -2275,6 +2276,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { // requestBody is the Handler's Request.Body type. // Read and Close may be called concurrently. type requestBody struct { + _ incomparable stream *stream conn *serverConn closed bool // for use by Close only diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index e4fb02530f..54acc1e360 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -916,7 +916,7 @@ func commaSeparatedTrailers(req *http.Request) (string, error) { k = http.CanonicalHeaderKey(k) switch k { case "Transfer-Encoding", "Trailer", "Content-Length": - return "", &badStringError{"invalid Trailer key", k} + return "", fmt.Errorf("invalid Trailer key %q", k) } keys = append(keys, k) } @@ -1394,13 +1394,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - // requires cc.mu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -1616,6 +1609,7 @@ func (cc *ClientConn) writeHeader(name, value string) { } type resAndError struct { + _ incomparable res *http.Response err error } @@ -1663,6 +1657,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type clientConnReadLoop struct { + _ incomparable cc *ClientConn closeWhenIdle bool } @@ -2479,6 +2474,7 @@ func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { // gzipReader wraps a response body so it can lazily // call gzip.NewReader on the first call to Read type gzipReader struct { + _ incomparable body io.ReadCloser // underlying Response.Body zr *gzip.Reader // lazily-initialized gzip reader zerr error // sticky error diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go index 701bd4b22d..c271ca46cb 100644 --- a/vendor/golang.org/x/net/ipv4/header.go +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -14,9 +14,8 @@ import ( ) const ( - Version = 4 // protocol version - HeaderLen = 20 // header length without extension headers - maxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields + Version = 4 // protocol version + HeaderLen = 20 // header length without extension headers ) type HeaderFlags int diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go index 1f7e87e672..d2c2308f1f 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -86,6 +86,7 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, } } diff --git a/vendor/k8s.io/client-go/restmapper/category_expansion.go b/vendor/k8s.io/client-go/restmapper/category_expansion.go new file mode 100644 index 0000000000..2537a2b4e2 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/category_expansion.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// CategoryExpander maps category strings to GroupResources. +// Categories are classification or 'tag' of a group of resources. +type CategoryExpander interface { + Expand(category string) ([]schema.GroupResource, bool) +} + +// SimpleCategoryExpander implements CategoryExpander interface +// using a static mapping of categories to GroupResource mapping. +type SimpleCategoryExpander struct { + Expansions map[string][]schema.GroupResource +} + +// Expand fulfills CategoryExpander +func (e SimpleCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret, ok := e.Expansions[category] + return ret, ok +} + +// discoveryCategoryExpander struct lets a REST Client wrapper (discoveryClient) to retrieve list of APIResourceList, +// and then convert to fallbackExpander +type discoveryCategoryExpander struct { + discoveryClient discovery.DiscoveryInterface +} + +// NewDiscoveryCategoryExpander returns a category expander that makes use of the "categories" fields from +// the API, found through the discovery client. In case of any error or no category found (which likely +// means we're at a cluster prior to categories support, fallback to the expander provided. +func NewDiscoveryCategoryExpander(client discovery.DiscoveryInterface) CategoryExpander { + if client == nil { + panic("Please provide discovery client to shortcut expander") + } + return discoveryCategoryExpander{discoveryClient: client} +} + +// Expand fulfills CategoryExpander +func (e discoveryCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + // Get all supported resources for groups and versions from server, if no resource found, fallback anyway. + apiResourceLists, _ := e.discoveryClient.ServerResources() + if len(apiResourceLists) == 0 { + return nil, false + } + + discoveredExpansions := map[string][]schema.GroupResource{} + for _, apiResourceList := range apiResourceLists { + gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) + if err != nil { + continue + } + // Collect GroupVersions by categories + for _, apiResource := range apiResourceList.APIResources { + if categories := apiResource.Categories; len(categories) > 0 { + for _, category := range categories { + groupResource := schema.GroupResource{ + Group: gv.Group, + Resource: apiResource.Name, + } + discoveredExpansions[category] = append(discoveredExpansions[category], groupResource) + } + } + } + } + + ret, ok := discoveredExpansions[category] + return ret, ok +} + +// UnionCategoryExpander implements CategoryExpander interface. +// It maps given category string to union of expansions returned by all the CategoryExpanders in the list. +type UnionCategoryExpander []CategoryExpander + +// Expand fulfills CategoryExpander +func (u UnionCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret := []schema.GroupResource{} + ok := false + + // Expand the category for each CategoryExpander in the list and merge/combine the results. + for _, expansion := range u { + curr, currOk := expansion.Expand(category) + + for _, currGR := range curr { + found := false + for _, existing := range ret { + if existing == currGR { + found = true + break + } + } + if !found { + ret = append(ret, currGR) + } + } + ok = ok || currOk + } + + return ret, ok +} diff --git a/vendor/k8s.io/client-go/restmapper/discovery.go b/vendor/k8s.io/client-go/restmapper/discovery.go new file mode 100644 index 0000000000..19ae95e1b5 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/discovery.go @@ -0,0 +1,338 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + + "k8s.io/klog/v2" +) + +// APIGroupResources is an API group with a mapping of versions to +// resources. +type APIGroupResources struct { + Group metav1.APIGroup + // A mapping of version string to a slice of APIResources for + // that version. + VersionedResources map[string][]metav1.APIResource +} + +// NewDiscoveryRESTMapper returns a PriorityRESTMapper based on the discovered +// groups and resources passed in. +func NewDiscoveryRESTMapper(groupResources []*APIGroupResources) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + + var groupPriority []string + // /v1 is special. It should always come first + resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}} + kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}} + + for _, group := range groupResources { + groupPriority = append(groupPriority, group.Group.Name) + + // Make sure the preferred version comes first + if len(group.Group.PreferredVersion.Version) != 0 { + preferred := group.Group.PreferredVersion.Version + if _, ok := group.VersionedResources[preferred]; ok { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Kind: meta.AnyKind, + }) + } + } + + for _, discoveryVersion := range group.Group.Versions { + resources, ok := group.VersionedResources[discoveryVersion.Version] + if !ok { + continue + } + + // Add non-preferred versions after the preferred version, in case there are resources that only exist in those versions + if discoveryVersion.Version != group.Group.PreferredVersion.Version { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Kind: meta.AnyKind, + }) + } + + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} + versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}) + + for _, resource := range resources { + scope := meta.RESTScopeNamespace + if !resource.Namespaced { + scope = meta.RESTScopeRoot + } + + // if we have a slash, then this is a subresource and we shouldn't create mappings for those. + if strings.Contains(resource.Name, "/") { + continue + } + + plural := gv.WithResource(resource.Name) + singular := gv.WithResource(resource.SingularName) + // this is for legacy resources and servers which don't list singular forms. For those we must still guess. + if len(resource.SingularName) == 0 { + _, singular = meta.UnsafeGuessKindToResource(gv.WithKind(resource.Kind)) + } + + versionMapper.AddSpecific(gv.WithKind(strings.ToLower(resource.Kind)), plural, singular, scope) + versionMapper.AddSpecific(gv.WithKind(resource.Kind), plural, singular, scope) + // TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior + versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) + } + // TODO why is this type not in discovery (at least for "v1") + versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) + unionMapper = append(unionMapper, versionMapper) + } + } + + for _, group := range groupPriority { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group, + Version: meta.AnyVersion, + Resource: meta.AnyResource, + }) + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group, + Version: meta.AnyVersion, + Kind: meta.AnyKind, + }) + } + + return meta.PriorityRESTMapper{ + Delegate: unionMapper, + ResourcePriority: resourcePriority, + KindPriority: kindPriority, + } +} + +// GetAPIGroupResources uses the provided discovery client to gather +// discovery information and populate a slice of APIGroupResources. +func GetAPIGroupResources(cl discovery.DiscoveryInterface) ([]*APIGroupResources, error) { + gs, rs, err := cl.ServerGroupsAndResources() + if rs == nil || gs == nil { + return nil, err + // TODO track the errors and update callers to handle partial errors. + } + rsm := map[string]*metav1.APIResourceList{} + for _, r := range rs { + rsm[r.GroupVersion] = r + } + + var result []*APIGroupResources + for _, group := range gs { + groupResources := &APIGroupResources{ + Group: *group, + VersionedResources: make(map[string][]metav1.APIResource), + } + for _, version := range group.Versions { + resources, ok := rsm[version.GroupVersion] + if !ok { + continue + } + groupResources.VersionedResources[version.Version] = resources.APIResources + } + result = append(result, groupResources) + } + return result, nil +} + +// DeferredDiscoveryRESTMapper is a RESTMapper that will defer +// initialization of the RESTMapper until the first mapping is +// requested. +type DeferredDiscoveryRESTMapper struct { + initMu sync.Mutex + delegate meta.RESTMapper + cl discovery.CachedDiscoveryInterface +} + +// NewDeferredDiscoveryRESTMapper returns a +// DeferredDiscoveryRESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +func NewDeferredDiscoveryRESTMapper(cl discovery.CachedDiscoveryInterface) *DeferredDiscoveryRESTMapper { + return &DeferredDiscoveryRESTMapper{ + cl: cl, + } +} + +func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { + d.initMu.Lock() + defer d.initMu.Unlock() + + if d.delegate != nil { + return d.delegate, nil + } + + groupResources, err := GetAPIGroupResources(d.cl) + if err != nil { + return nil, err + } + + d.delegate = NewDiscoveryRESTMapper(groupResources) + return d.delegate, err +} + +// Reset resets the internally cached Discovery information and will +// cause the next mapping request to re-discover. +func (d *DeferredDiscoveryRESTMapper) Reset() { + klog.V(5).Info("Invalidating discovery information") + + d.initMu.Lock() + defer d.initMu.Unlock() + + d.cl.Invalidate() + d.delegate = nil +} + +// KindFor takes a partial resource and returns back the single match. +// It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionKind{}, err + } + gvk, err = del.KindFor(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvk, err = d.KindFor(resource) + } + return +} + +// KindsFor takes a partial resource and returns back the list of +// potential kinds in priority order. +func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvks, err = del.KindsFor(resource) + if len(gvks) == 0 && !d.cl.Fresh() { + d.Reset() + gvks, err = d.KindsFor(resource) + } + return +} + +// ResourceFor takes a partial resource and returns back the single +// match. It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, err = del.ResourceFor(input) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvr, err = d.ResourceFor(input) + } + return +} + +// ResourcesFor takes a partial resource and returns back the list of +// potential resource in priority order. +func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvrs, err = del.ResourcesFor(input) + if len(gvrs) == 0 && !d.cl.Fresh() { + d.Reset() + gvrs, err = d.ResourcesFor(input) + } + return +} + +// RESTMapping identifies a preferred resource mapping for the +// provided group kind. +func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + m, err = del.RESTMapping(gk, versions...) + if err != nil && !d.cl.Fresh() { + d.Reset() + m, err = d.RESTMapping(gk, versions...) + } + return +} + +// RESTMappings returns the RESTMappings for the provided group kind +// in a rough internal preferred order. If no kind is found, it will +// return a NoResourceMatchError. +func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + ms, err = del.RESTMappings(gk, versions...) + if len(ms) == 0 && !d.cl.Fresh() { + d.Reset() + ms, err = d.RESTMappings(gk, versions...) + } + return +} + +// ResourceSingularizer converts a resource name from plural to +// singular (e.g., from pods to pod). +func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + del, err := d.getDelegate() + if err != nil { + return resource, err + } + singular, err = del.ResourceSingularizer(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + singular, err = d.ResourceSingularizer(resource) + } + return +} + +func (d *DeferredDiscoveryRESTMapper) String() string { + del, err := d.getDelegate() + if err != nil { + return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) + } + return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) +} + +// Make sure it satisfies the interface +var _ meta.RESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go new file mode 100644 index 0000000000..6903ec8088 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -0,0 +1,172 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "strings" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// shortcutExpander is a RESTMapper that can be used for Kubernetes resources. It expands the resource first, then invokes the wrapped +type shortcutExpander struct { + RESTMapper meta.RESTMapper + + discoveryClient discovery.DiscoveryInterface +} + +var _ meta.RESTMapper = &shortcutExpander{} + +// NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client} +} + +// KindFor fulfills meta.RESTMapper +func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + return e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) +} + +// KindsFor fulfills meta.RESTMapper +func (e shortcutExpander) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + return e.RESTMapper.KindsFor(e.expandResourceShortcut(resource)) +} + +// ResourcesFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return e.RESTMapper.ResourcesFor(e.expandResourceShortcut(resource)) +} + +// ResourceFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return e.RESTMapper.ResourceFor(e.expandResourceShortcut(resource)) +} + +// ResourceSingularizer fulfills meta.RESTMapper +func (e shortcutExpander) ResourceSingularizer(resource string) (string, error) { + return e.RESTMapper.ResourceSingularizer(e.expandResourceShortcut(schema.GroupVersionResource{Resource: resource}).Resource) +} + +// RESTMapping fulfills meta.RESTMapper +func (e shortcutExpander) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return e.RESTMapper.RESTMapping(gk, versions...) +} + +// RESTMappings fulfills meta.RESTMapper +func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + return e.RESTMapper.RESTMappings(gk, versions...) +} + +// getShortcutMappings returns a set of tuples which holds short names for resources. +// First the list of potential resources will be taken from the API server. +// Next we will append the hardcoded list of resources - to be backward compatible with old servers. +// NOTE that the list is ordered by group priority. +func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []resourceShortcuts, error) { + res := []resourceShortcuts{} + // get server resources + // This can return an error *and* the results it was able to find. We don't need to fail on the error. + apiResList, err := e.discoveryClient.ServerResources() + if err != nil { + klog.V(1).Infof("Error loading discovery information: %v", err) + } + for _, apiResources := range apiResList { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) + continue + } + for _, apiRes := range apiResources.APIResources { + for _, shortName := range apiRes.ShortNames { + rs := resourceShortcuts{ + ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName}, + LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name}, + } + res = append(res, rs) + } + } + } + + return apiResList, res, nil +} + +// expandResourceShortcut will return the expanded version of resource +// (something that a pkg/api/meta.RESTMapper can understand), if it is +// indeed a shortcut. If no match has been found, we will match on group prefixing. +// Lastly we will return resource unmodified. +func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionResource) schema.GroupVersionResource { + // get the shortcut mappings and return on first match. + if allResources, shortcutResources, err := e.getShortcutMappings(); err == nil { + // avoid expanding if there's an exact match to a full resource name + for _, apiResources := range allResources { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + continue + } + if len(resource.Group) != 0 && resource.Group != gv.Group { + continue + } + for _, apiRes := range apiResources.APIResources { + if resource.Resource == apiRes.Name { + return resource + } + if resource.Resource == apiRes.SingularName { + return resource + } + } + } + + for _, item := range shortcutResources { + if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + + // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling + if len(resource.Group) == 0 { + return resource + } + for _, item := range shortcutResources { + if !strings.HasPrefix(item.ShortForm.Group, resource.Group) { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + } + + return resource +} + +// ResourceShortcuts represents a structure that holds the information how to +// transition from resource's shortcut to its full name. +type resourceShortcuts struct { + ShortForm schema.GroupResource + LongForm schema.GroupResource +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 09c0aa6cf5..88d921be01 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -42,7 +42,7 @@ github.com/google/uuid github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions github.com/googleapis/gnostic/openapiv2 -# github.com/hashicorp/golang-lru v0.5.1 +# github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru # github.com/imdario/mergo v0.3.9 @@ -96,7 +96,7 @@ github.com/spf13/pflag golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 golang.org/x/crypto/ssh/terminal -# golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e +# golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 golang.org/x/net/bpf golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -212,7 +212,7 @@ google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 -# gopkg.in/yaml.v2 v2.2.8 +# gopkg.in/yaml.v2 v2.3.0 gopkg.in/yaml.v2 # k8s.io/api v0.19.0-rc.2 => k8s.io/api v0.19.0-rc.2 k8s.io/api/admissionregistration/v1 @@ -494,6 +494,7 @@ k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest k8s.io/client-go/rest/fake k8s.io/client-go/rest/watch +k8s.io/client-go/restmapper k8s.io/client-go/testing k8s.io/client-go/tools/auth k8s.io/client-go/tools/cache @@ -539,6 +540,9 @@ k8s.io/kubernetes/pkg/apis/core/helper k8s.io/utils/buffer k8s.io/utils/integer k8s.io/utils/trace +# sigs.k8s.io/controller-runtime v0.6.2 +sigs.k8s.io/controller-runtime/pkg/client +sigs.k8s.io/controller-runtime/pkg/client/apiutil # sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.1.0-rc1 sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller/metrics diff --git a/vendor/sigs.k8s.io/controller-runtime/LICENSE b/vendor/sigs.k8s.io/controller-runtime/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go new file mode 100644 index 0000000000..9fe32b21f3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -0,0 +1,97 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package apiutil contains utilities for working with raw Kubernetes +// API machinery, such as creating RESTMappers and raw REST clients, +// and extracting the GVK of an object. +package apiutil + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery +// information fetched by a new client with the given config. +func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { + // Get a mapper + dc, err := discovery.NewDiscoveryClientForConfig(c) + if err != nil { + return nil, err + } + gr, err := restmapper.GetAPIGroupResources(dc) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(gr), nil +} + +// GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. +func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + gvks, isUnversioned, err := scheme.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + if isUnversioned { + return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj) + } + + if len(gvks) < 1 { + return schema.GroupVersionKind{}, fmt.Errorf("no group-version-kinds associated with type %T", obj) + } + if len(gvks) > 1 { + // this should only trigger for things like metav1.XYZ -- + // normal versioned types should be fine + return schema.GroupVersionKind{}, fmt.Errorf( + "multiple group-version-kinds associated with type %T, refusing to guess at one", obj) + } + return gvks[0], nil +} + +// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated +// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from +// baseConfig, if set, otherwise a default serializer will be set. +func RESTClientForGVK(gvk schema.GroupVersionKind, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { + cfg := createRestConfig(gvk, baseConfig) + if cfg.NegotiatedSerializer == nil { + cfg.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: codecs} + } + return rest.RESTClientFor(cfg) +} + +//createRestConfig copies the base config and updates needed fields for a new rest config +func createRestConfig(gvk schema.GroupVersionKind, baseConfig *rest.Config) *rest.Config { + gv := gvk.GroupVersion() + + cfg := rest.CopyConfig(baseConfig) + cfg.GroupVersion = &gv + if gvk.Group == "" { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + if cfg.UserAgent == "" { + cfg.UserAgent = rest.DefaultKubernetesUserAgent() + } + return cfg +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go new file mode 100644 index 0000000000..5c34070e4b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go @@ -0,0 +1,323 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "errors" + "sync" + "time" + + "golang.org/x/time/rate" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// ErrRateLimited is returned by a RESTMapper method if the number of API +// calls has exceeded a limit within a certain time period. +type ErrRateLimited struct { + // Duration to wait until the next API call can be made. + Delay time.Duration +} + +func (e ErrRateLimited) Error() string { + return "too many API calls to the RESTMapper within a timeframe" +} + +// DelayIfRateLimited returns the delay time until the next API call is +// allowed and true if err is of type ErrRateLimited. The zero +// time.Duration value and false are returned if err is not a ErrRateLimited. +func DelayIfRateLimited(err error) (time.Duration, bool) { + var rlerr ErrRateLimited + if errors.As(err, &rlerr) { + return rlerr.Delay, true + } + return 0, false +} + +// dynamicRESTMapper is a RESTMapper that dynamically discovers resource +// types at runtime. +type dynamicRESTMapper struct { + mu sync.RWMutex // protects the following fields + staticMapper meta.RESTMapper + limiter *dynamicLimiter + newMapper func() (meta.RESTMapper, error) + + lazy bool + // Used for lazy init. + initOnce sync.Once +} + +// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper +type DynamicRESTMapperOption func(*dynamicRESTMapper) error + +// WithLimiter sets the RESTMapper's underlying limiter to lim. +func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.limiter = &dynamicLimiter{lim} + return nil + } +} + +// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings +// until an API call is made. +var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { + drm.lazy = true + return nil +} + +// WithCustomMapper supports setting a custom RESTMapper refresher instead of +// the default method, which uses a discovery client. +// +// This exists mainly for testing, but can be useful if you need tighter control +// over how discovery is performed, which discovery endpoints are queried, etc. +func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.newMapper = newMapper + return nil + } +} + +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. opts +// configure the RESTMapper. +func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { + client, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return nil, err + } + drm := &dynamicRESTMapper{ + limiter: &dynamicLimiter{ + rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), + }, + newMapper: func() (meta.RESTMapper, error) { + groupResources, err := restmapper.GetAPIGroupResources(client) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(groupResources), nil + }, + } + for _, opt := range opts { + if err = opt(drm); err != nil { + return nil, err + } + } + if !drm.lazy { + if err := drm.setStaticMapper(); err != nil { + return nil, err + } + } + return drm, nil +} + +var ( + // defaultRefilRate is the default rate at which potential calls are + // added back to the "bucket" of allowed calls. + defaultRefillRate = 5 + // defaultLimitSize is the default starting/max number of potential calls + // per second. Once a call is used, it's added back to the bucket at a rate + // of defaultRefillRate per second. + defaultLimitSize = 5 +) + +// setStaticMapper sets drm's staticMapper by querying its client, regardless +// of reload backoff. +func (drm *dynamicRESTMapper) setStaticMapper() error { + newMapper, err := drm.newMapper() + if err != nil { + return err + } + drm.staticMapper = newMapper + return nil +} + +// init initializes drm only once if drm is lazy. +func (drm *dynamicRESTMapper) init() (err error) { + drm.initOnce.Do(func() { + if drm.lazy { + err = drm.setStaticMapper() + } + }) + return err +} + +// checkAndReload attempts to call the given callback, which is assumed to be dependent +// on the data in the restmapper. +// +// If the callback returns a NoKindMatchError, it will attempt to reload +// the RESTMapper's data and re-call the callback once that's occurred. +// If the callback returns any other error, the function will return immediately regardless. +// +// It will take care +// ensuring that reloads are rate-limitted and that extraneous calls aren't made. +// It's thread-safe, and worries about thread-safety for the callback (so the callback does +// not need to attempt to lock the restmapper). +func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsReload func() error) error { + // first, check the common path -- data is fresh enough + // (use an IIFE for the lock's defer) + err := func() error { + drm.mu.RLock() + defer drm.mu.RUnlock() + + return checkNeedsReload() + }() + + // NB(directxman12): `Is` and `As` have a confusing relationship -- + // `Is` is like `== or does this implement .Is`, whereas `As` says + // `can I type-assert into` + needsReload := errors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // if the data wasn't fresh, we'll need to try and update it, so grab the lock... + drm.mu.Lock() + defer drm.mu.Unlock() + + // ... and double-check that we didn't reload in the meantime + err = checkNeedsReload() + needsReload = errors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // we're still stale, so grab a rate-limit token if we can... + if err := drm.limiter.checkRate(); err != nil { + return err + } + + // ...reload... + if err := drm.setStaticMapper(); err != nil { + return err + } + + // ...and return the results of the closure regardless + return checkNeedsReload() +} + +// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. + +func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionKind{}, err + } + var gvk schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvk, err = drm.staticMapper.KindFor(resource) + return err + }) + return gvk, err +} + +func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvks []schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvks, err = drm.staticMapper.KindsFor(resource) + return err + }) + return gvks, err +} + +func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionResource{}, err + } + + var gvr schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvr, err = drm.staticMapper.ResourceFor(input) + return err + }) + return gvr, err +} + +func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvrs []schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvrs, err = drm.staticMapper.ResourcesFor(input) + return err + }) + return gvrs, err +} + +func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mapping *meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mapping, err = drm.staticMapper.RESTMapping(gk, versions...) + return err + }) + return mapping, err +} + +func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mappings []*meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mappings, err = drm.staticMapper.RESTMappings(gk, versions...) + return err + }) + return mappings, err +} + +func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { + if err := drm.init(); err != nil { + return "", err + } + var singular string + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + singular, err = drm.staticMapper.ResourceSingularizer(resource) + return err + }) + return singular, err +} + +// dynamicLimiter holds a rate limiter used to throttle chatty RESTMapper users. +type dynamicLimiter struct { + *rate.Limiter +} + +// checkRate returns an ErrRateLimited if too many API calls have been made +// within the set limit. +func (b *dynamicLimiter) checkRate() error { + res := b.Reserve() + if res.Delay() == 0 { + return nil + } + res.Cancel() + return ErrRateLimited{res.Delay()} +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go new file mode 100644 index 0000000000..c1c4d5d691 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -0,0 +1,208 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// Options are creation options for a Client +type Options struct { + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper +} + +// New returns a new Client using the provided config and Options. +// The returned client reads *and* writes directly from the server +// (it doesn't use object caches). It understands how to work with +// normal types (both custom resources and aggregated/built-in resources), +// as well as unstructured types. +// +// In the case of normal types, the scheme will be used to look up the +// corresponding group, version, and kind for the given type. In the +// case of unstructured types, the group, version, and kind will be extracted +// from the corresponding fields on the object. +func New(config *rest.Config, options Options) (Client, error) { + if config == nil { + return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") + } + + // Init a scheme if none provided + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + // Init a Mapper if none provided + if options.Mapper == nil { + var err error + options.Mapper, err = apiutil.NewDynamicRESTMapper(config) + if err != nil { + return nil, err + } + } + + clientcache := &clientCache{ + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), + resourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + } + + c := &client{ + typedClient: typedClient{ + cache: clientcache, + paramCodec: runtime.NewParameterCodec(options.Scheme), + }, + unstructuredClient: unstructuredClient{ + cache: clientcache, + paramCodec: noConversionParamCodec{}, + }, + } + + return c, nil +} + +var _ Client = &client{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type client struct { + typedClient typedClient + unstructuredClient unstructuredClient +} + +// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. +// TODO(vincepri): Remove this function and its calls once controller-runtime dependencies are upgraded to 1.16? +func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) { + if gvk != schema.EmptyObjectKind.GroupVersionKind() { + if v, ok := obj.(schema.ObjectKind); ok { + v.SetGroupVersionKind(gvk) + } + } +} + +// Create implements client.Client +func (c *client) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.Create(ctx, obj, opts...) + } + return c.typedClient.Create(ctx, obj, opts...) +} + +// Update implements client.Client +func (c *client) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.Update(ctx, obj, opts...) + } + return c.typedClient.Update(ctx, obj, opts...) +} + +// Delete implements client.Client +func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.Delete(ctx, obj, opts...) + } + return c.typedClient.Delete(ctx, obj, opts...) +} + +// DeleteAllOf implements client.Client +func (c *client) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + } + return c.typedClient.DeleteAllOf(ctx, obj, opts...) +} + +// Patch implements client.Client +func (c *client) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + } + return c.typedClient.Patch(ctx, obj, patch, opts...) +} + +// Get implements client.Client +func (c *client) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.Get(ctx, key, obj) + } + return c.typedClient.Get(ctx, key, obj) +} + +// List implements client.Client +func (c *client) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { + _, ok := obj.(*unstructured.UnstructuredList) + if ok { + return c.unstructuredClient.List(ctx, obj, opts...) + } + return c.typedClient.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient +func (c *client) Status() StatusWriter { + return &statusWriter{client: c} +} + +// statusWriter is client.StatusWriter that writes status subresource +type statusWriter struct { + client *client +} + +// ensure statusWriter implements client.StatusWriter +var _ StatusWriter = &statusWriter{} + +// Update implements client.StatusWriter +func (sw *statusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + _, ok := obj.(*unstructured.Unstructured) + if ok { + return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) + } + return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) +} + +// Patch implements client.Client +func (sw *statusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + _, ok := obj.(*unstructured.Unstructured) + if ok { + return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) + } + return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go new file mode 100644 index 0000000000..7741ac3c7e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// clientCache creates and caches rest clients and metadata for Kubernetes types +type clientCache struct { + // config is the rest.Config to talk to an apiserver + config *rest.Config + + // scheme maps go structs to GroupVersionKinds + scheme *runtime.Scheme + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // codecs are used to create a REST client for a gvk + codecs serializer.CodecFactory + + // resourceByType caches type metadata + resourceByType map[schema.GroupVersionKind]*resourceMeta + mu sync.RWMutex +} + +// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList bool) (*resourceMeta, error) { + if strings.HasSuffix(gvk.Kind, "List") && isList { + // if this was a list, treat it as a request for the item's resource + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + client, err := apiutil.RESTClientForGVK(gvk, c.config, c.codecs) + if err != nil { + return nil, err + } + mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil +} + +// getResource returns the resource meta information for the given type of object. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, err + } + + // It's better to do creation work twice than to not let multiple + // people make requests at once + c.mu.RLock() + r, known := c.resourceByType[gvk] + c.mu.RUnlock() + + if known { + return r, nil + } + + // Initialize a new Client + c.mu.Lock() + defer c.mu.Unlock() + r, err = c.newResource(gvk, meta.IsListType(obj)) + if err != nil { + return nil, err + } + c.resourceByType[gvk] = r + return r, err +} + +// getObjMeta returns objMeta containing both type and object metadata and state +func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { + r, err := c.getResource(obj) + if err != nil { + return nil, err + } + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + return &objMeta{resourceMeta: r, Object: m}, err +} + +// resourceMeta caches state for a Kubernetes type. +type resourceMeta struct { + // client is the rest client used to talk to the apiserver + rest.Interface + // gvk is the GroupVersionKind of the resourceMeta + gvk schema.GroupVersionKind + // mapping is the rest mapping + mapping *meta.RESTMapping +} + +// isNamespaced returns true if the type is namespaced +func (r *resourceMeta) isNamespaced() bool { + return r.mapping.Scope.Name() != meta.RESTScopeNameRoot + +} + +// resource returns the resource name of the type +func (r *resourceMeta) resource() string { + return r.mapping.Resource.Resource +} + +// objMeta stores type and object information about a Kubernetes type +type objMeta struct { + // resourceMeta contains type information for the object + *resourceMeta + + // Object contains meta data for the object instance + metav1.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go new file mode 100644 index 0000000000..48a8af42a6 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go @@ -0,0 +1,24 @@ +package client + +import ( + "errors" + "net/url" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.ParameterCodec = noConversionParamCodec{} + +// noConversionParamCodec is a no-conversion codec for serializing parameters into URL query strings. +// it's useful in scenarios with the unstructured client and arbitrary resouces. +type noConversionParamCodec struct{} + +func (noConversionParamCodec) EncodeParameters(obj runtime.Object, to schema.GroupVersion) (url.Values, error) { + return queryparams.Convert(obj) +} + +func (noConversionParamCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into runtime.Object) error { + return errors.New("DecodeParameters not implemented on noConversionParamCodec") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go new file mode 100644 index 0000000000..2965e5fa94 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package client contains functionality for interacting with Kubernetes API +// servers. +// +// Clients +// +// Clients are split into two interfaces -- Readers and Writers. Readers +// get and list, while writers create, update, and delete. +// +// The New function can be used to create a new client that talks directly +// to the API server. +// +// A common pattern in Kubernetes to read from a cache and write to the API +// server. This pattern is covered by the DelegatingClient type, which can +// be used to have a client whose Reader is different from the Writer. +// +// Options +// +// Many client operations in Kubernetes support options. These options are +// represented as variadic arguments at the end of a given method call. +// For instance, to use a label selector on list, you can call +// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"}) +// +// Indexing +// +// Indexes may be added to caches using a FieldIndexer. This allows you to easily +// and efficiently look up objects with certain properties. You can then make +// use of the index by specifying a field selector on calls to List on the Reader +// corresponding to the given Cache. +// +// For instance, a Secret controller might have an index on the +// `.spec.volumes.secret.secretName` field in Pod objects, so that it could +// easily look up all pods that reference a given secret. +package client diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go new file mode 100644 index 0000000000..ced0548b1a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -0,0 +1,95 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +// NewDryRunClient wraps an existing client and enforces DryRun mode +// on all mutating api calls. +func NewDryRunClient(c Client) Client { + return &dryRunClient{client: c} +} + +var _ Client = &dryRunClient{} + +// dryRunClient is a Client that wraps another Client in order to enforce DryRun mode. +type dryRunClient struct { + client Client +} + +// Create implements client.Client +func (c *dryRunClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { + return c.client.Create(ctx, obj, append(opts, DryRunAll)...) +} + +// Update implements client.Client +func (c *dryRunClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + return c.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Delete implements client.Client +func (c *dryRunClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { + return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) +} + +// DeleteAllOf implements client.Client +func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.Client +func (c *dryRunClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} + +// Get implements client.Client +func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { + return c.client.Get(ctx, key, obj) +} + +// List implements client.Client +func (c *dryRunClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { + return c.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient +func (c *dryRunClient) Status() StatusWriter { + return &dryRunStatusWriter{client: c.client.Status()} +} + +// ensure dryRunStatusWriter implements client.StatusWriter +var _ StatusWriter = &dryRunStatusWriter{} + +// dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode +// enforced. +type dryRunStatusWriter struct { + client StatusWriter +} + +// Update implements client.StatusWriter +func (sw *dryRunStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.StatusWriter +func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go new file mode 100644 index 0000000000..9c96947f81 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -0,0 +1,135 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// ObjectKey identifies a Kubernetes Object. +type ObjectKey = types.NamespacedName + +// ObjectKeyFromObject returns the ObjectKey given a runtime.Object +func ObjectKeyFromObject(obj runtime.Object) (ObjectKey, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return ObjectKey{}, err + } + return ObjectKey{Namespace: accessor.GetNamespace(), Name: accessor.GetName()}, nil +} + +// Patch is a patch that can be applied to a Kubernetes object. +type Patch interface { + // Type is the PatchType of the patch. + Type() types.PatchType + // Data is the raw data representing the patch. + Data(obj runtime.Object) ([]byte, error) +} + +// TODO(directxman12): is there a sane way to deal with get/delete options? + +// Reader knows how to read and list Kubernetes objects. +type Reader interface { + // Get retrieves an obj for the given object key from the Kubernetes Cluster. + // obj must be a struct pointer so that obj can be updated with the response + // returned by the Server. + Get(ctx context.Context, key ObjectKey, obj runtime.Object) error + + // List retrieves list of objects for a given namespace and list options. On a + // successful call, Items field in the list will be populated with the + // result returned from the server. + List(ctx context.Context, list runtime.Object, opts ...ListOption) error +} + +// Writer knows how to create, delete, and update Kubernetes objects. +type Writer interface { + // Create saves the object obj in the Kubernetes cluster. + Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error + + // Delete deletes the given obj from Kubernetes cluster. + Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error + + // Update updates the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error + + // Patch patches the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error + + // DeleteAllOf deletes all objects of the given type matching the given options. + DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error +} + +// StatusClient knows how to create a client which can update status subresource +// for kubernetes objects. +type StatusClient interface { + Status() StatusWriter +} + +// StatusWriter knows how to update status subresource of a Kubernetes object. +type StatusWriter interface { + // Update updates the fields corresponding to the status subresource for the + // given obj. obj must be a struct pointer so that obj can be updated + // with the content returned by the Server. + Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error + + // Patch patches the given object's subresource. obj must be a struct + // pointer so that obj can be updated with the content returned by the + // Server. + Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error +} + +// Client knows how to perform CRUD operations on Kubernetes objects. +type Client interface { + Reader + Writer + StatusClient +} + +// IndexerFunc knows how to take an object and turn it into a series +// of non-namespaced keys. Namespaced objects are automatically given +// namespaced and non-spaced variants, so keys do not need to include namespace. +type IndexerFunc func(runtime.Object) []string + +// FieldIndexer knows how to index over a particular "field" such that it +// can later be used by a field selector. +type FieldIndexer interface { + // IndexFields adds an index with the given field name on the given object type + // by using the given function to extract the value for that field. If you want + // compatibility with the Kubernetes API server, only return one key, and only use + // fields that the API server supports. Otherwise, you can return multiple keys, + // and "equality" in the field selector means that at least one key matches the value. + // The FieldIndexer will automatically take care of indexing over namespace + // and supporting efficient all-namespace queries. + IndexField(ctx context.Context, obj runtime.Object, field string, extractValue IndexerFunc) error +} + +// IgnoreNotFound returns nil on NotFound errors. +// All other values that are not NotFound errors or nil are returned unmodified. +func IgnoreNotFound(err error) error { + if apierrors.IsNotFound(err) { + return nil + } + return err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go new file mode 100644 index 0000000000..131bdc2a04 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -0,0 +1,720 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" +) + +// {{{ "Functional" Option Interfaces + +// CreateOption is some configuration that modifies options for a create request. +type CreateOption interface { + // ApplyToCreate applies this configuration to the given create options. + ApplyToCreate(*CreateOptions) +} + +// DeleteOption is some configuration that modifies options for a delete request. +type DeleteOption interface { + // ApplyToDelete applies this configuration to the given delete options. + ApplyToDelete(*DeleteOptions) +} + +// ListOption is some configuration that modifies options for a list request. +type ListOption interface { + // ApplyToList applies this configuration to the given list options. + ApplyToList(*ListOptions) +} + +// UpdateOption is some configuration that modifies options for a update request. +type UpdateOption interface { + // ApplyToUpdate applies this configuration to the given update options. + ApplyToUpdate(*UpdateOptions) +} + +// PatchOption is some configuration that modifies options for a patch request. +type PatchOption interface { + // ApplyToPatch applies this configuration to the given patch options. + ApplyToPatch(*PatchOptions) +} + +// DeleteAllOfOption is some configuration that modifies options for a delete request. +type DeleteAllOfOption interface { + // ApplyToDeleteAllOf applies this configuration to the given deletecollection options. + ApplyToDeleteAllOf(*DeleteAllOfOptions) +} + +// }}} + +// {{{ Multi-Type Options + +// DryRunAll sets the "dry run" option to "all", executing all +// validation, etc without persisting the change to storage. +var DryRunAll = dryRunAll{} + +type dryRunAll struct{} + +// ApplyToCreate applies this configuration to the given create options. +func (dryRunAll) ApplyToCreate(opts *CreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToUpdate applies this configuration to the given update options. +func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given patch options. +func (dryRunAll) ApplyToPatch(opts *PatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given delete options. +func (dryRunAll) ApplyToDelete(opts *DeleteOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} +func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// FieldOwner set the field manager name for the given server-side apply patch. +type FieldOwner string + +// ApplyToPatch applies this configuration to the given patch options. +func (f FieldOwner) ApplyToPatch(opts *PatchOptions) { + opts.FieldManager = string(f) +} + +// ApplyToCreate applies this configuration to the given create options. +func (f FieldOwner) ApplyToCreate(opts *CreateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToUpdate applies this configuration to the given update options. +func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldManager = string(f) +} + +// }}} + +// {{{ Create Options + +// CreateOptions contains options for create requests. It's generally a subset +// of metav1.CreateOptions. +type CreateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw CreateOptions, as passed to the API server. + Raw *metav1.CreateOptions +} + +// AsCreateOptions returns these options as a metav1.CreateOptions. +// This may mutate the Raw field. +func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { + if o == nil { + return &metav1.CreateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.CreateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given create options on these options, +// and then returns itself (for convenient chaining). +func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { + for _, opt := range opts { + opt.ApplyToCreate(o) + } + return o +} + +// ApplyToCreate implements CreateOption +func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { + if o.DryRun != nil { + co.DryRun = o.DryRun + } + if o.FieldManager != "" { + co.FieldManager = o.FieldManager + } + if o.Raw != nil { + co.Raw = o.Raw + } +} + +var _ CreateOption = &CreateOptions{} + +// CreateDryRunAll sets the "dry run" option to "all". +// +// Deprecated: Use DryRunAll +var CreateDryRunAll = DryRunAll + +// }}} + +// {{{ Delete Options + +// DeleteOptions contains options for delete requests. It's generally a subset +// of metav1.DeleteOptions. +type DeleteOptions struct { + // GracePeriodSeconds is the duration in seconds before the object should be + // deleted. Value must be non-negative integer. The value zero indicates + // delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + GracePeriodSeconds *int64 + + // Preconditions must be fulfilled before a deletion is carried out. If not + // possible, a 409 Conflict status will be returned. + Preconditions *metav1.Preconditions + + // PropagationPolicy determined whether and how garbage collection will be + // performed. Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + PropagationPolicy *metav1.DeletionPropagation + + // Raw represents raw DeleteOptions, as passed to the API server. + Raw *metav1.DeleteOptions + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string +} + +// AsDeleteOptions returns these options as a metav1.DeleteOptions. +// This may mutate the Raw field. +func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { + if o == nil { + return &metav1.DeleteOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.DeleteOptions{} + } + + o.Raw.GracePeriodSeconds = o.GracePeriodSeconds + o.Raw.Preconditions = o.Preconditions + o.Raw.PropagationPolicy = o.PropagationPolicy + o.Raw.DryRun = o.DryRun + return o.Raw +} + +// ApplyOptions applies the given delete options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { + for _, opt := range opts { + opt.ApplyToDelete(o) + } + return o +} + +var _ DeleteOption = &DeleteOptions{} + +// ApplyToDelete implements DeleteOption +func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { + if o.GracePeriodSeconds != nil { + do.GracePeriodSeconds = o.GracePeriodSeconds + } + if o.Preconditions != nil { + do.Preconditions = o.Preconditions + } + if o.PropagationPolicy != nil { + do.PropagationPolicy = o.PropagationPolicy + } + if o.Raw != nil { + do.Raw = o.Raw + } + if o.DryRun != nil { + do.DryRun = o.DryRun + } +} + +// GracePeriodSeconds sets the grace period for the deletion +// to the given number of seconds. +type GracePeriodSeconds int64 + +// ApplyToDelete applies this configuration to the given delete options. +func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) { + secs := int64(s) + opts.GracePeriodSeconds = &secs +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + s.ApplyToDelete(&opts.DeleteOptions) +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions metav1.Preconditions + +// ApplyToDelete applies this configuration to the given delete options. +func (p Preconditions) ApplyToDelete(opts *DeleteOptions) { + preconds := metav1.Preconditions(p) + opts.Preconditions = &preconds +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// PropagationPolicy determined whether and how garbage collection will be +// performed. Either this field or OrphanDependents may be set, but not both. +// The default policy is decided by the existing finalizer set in the +// metadata.finalizers and the resource-specific default policy. +// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - +// allow the garbage collector to delete the dependents in the background; +// 'Foreground' - a cascading policy that deletes all dependents in the +// foreground. +type PropagationPolicy metav1.DeletionPropagation + +// ApplyToDelete applies the given delete options on these options. +// It will propagate to the dependents of the object to let the garbage collector handle it. +func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) { + policy := metav1.DeletionPropagation(p) + opts.PropagationPolicy = &policy +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// }}} + +// {{{ List Options + +// ListOptions contains options for limiting or filtering results. +// It's generally a subset of metav1.ListOptions, with support for +// pre-parsed selectors (since generally, selectors will be executed +// against the cache). +type ListOptions struct { + // LabelSelector filters results by label. Use SetLabelSelector to + // set from raw string form. + LabelSelector labels.Selector + // FieldSelector filters results by a particular field. In order + // to use this with cache-based implementations, restrict usage to + // a single field-value pair that's been added to the indexers. + FieldSelector fields.Selector + + // Namespace represents the namespace to list for, or empty for + // non-namespaced objects, or to list across all namespaces. + Namespace string + + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. This field is not supported if watch + // is true in the Raw ListOptions. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. This field is not supported if watch is true in the Raw ListOptions. + Continue string + + // Raw represents raw ListOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface, + // and the LabelSelector, FieldSelector, Limit and Continue fields are ignored. + Raw *metav1.ListOptions +} + +var _ ListOption = &ListOptions{} + +// ApplyToList implements ListOption for ListOptions +func (o *ListOptions) ApplyToList(lo *ListOptions) { + if o.LabelSelector != nil { + lo.LabelSelector = o.LabelSelector + } + if o.FieldSelector != nil { + lo.FieldSelector = o.FieldSelector + } + if o.Namespace != "" { + lo.Namespace = o.Namespace + } + if o.Raw != nil { + lo.Raw = o.Raw + } + if o.Limit > 0 { + lo.Limit = o.Limit + } + if o.Continue != "" { + lo.Continue = o.Continue + } +} + +// AsListOptions returns these options as a flattened metav1.ListOptions. +// This may mutate the Raw field. +func (o *ListOptions) AsListOptions() *metav1.ListOptions { + if o == nil { + return &metav1.ListOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.ListOptions{} + } + if o.LabelSelector != nil { + o.Raw.LabelSelector = o.LabelSelector.String() + } + if o.FieldSelector != nil { + o.Raw.FieldSelector = o.FieldSelector.String() + } + if !o.Raw.Watch { + o.Raw.Limit = o.Limit + o.Raw.Continue = o.Continue + } + return o.Raw +} + +// ApplyOptions applies the given list options on these options, +// and then returns itself (for convenient chaining). +func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions { + for _, opt := range opts { + opt.ApplyToList(o) + } + return o +} + +// MatchingLabels filters the list/delete operation on the given set of labels. +type MatchingLabels map[string]string + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabels) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid reserializing this over and over? + sel := labels.SelectorFromValidatedSet(map[string]string(m)) + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// HasLabels filters the list/delete operation checking if the set of labels exists +// without checking their values. +type HasLabels []string + +// ApplyToList applies this configuration to the given list options. +func (m HasLabels) ApplyToList(opts *ListOptions) { + sel := labels.NewSelector() + for _, label := range m { + r, err := labels.NewRequirement(label, selection.Exists, nil) + if err == nil { + sel = sel.Add(*r) + } + } + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m HasLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingLabelsSelector filters the list/delete operation on the given label +// selector (or index in the case of cached lists). A struct is used because +// labels.Selector is an interface, which cannot be aliased. +type MatchingLabelsSelector struct { + labels.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) { + opts.LabelSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingField filters the list operation on the given field selector +// (or index in the case of cached lists). +// +// Deprecated: Use MatchingFields +func MatchingField(name, val string) MatchingFields { + return MatchingFields{name: val} +} + +// MatchingFields filters the list/delete operation on the given field Set +// (or index in the case of cached lists). +type MatchingFields fields.Set + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFields) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid re-serializing this? + sel := fields.Set(m).AsSelector() + opts.FieldSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFieldsSelector filters the list/delete operation on the given field +// selector (or index in the case of cached lists). A struct is used because +// fields.Selector is an interface, which cannot be aliased. +type MatchingFieldsSelector struct { + fields.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) { + opts.FieldSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// InNamespace restricts the list/delete operation to the given namespace. +type InNamespace string + +// ApplyToList applies this configuration to the given list options. +func (n InNamespace) ApplyToList(opts *ListOptions) { + opts.Namespace = string(n) +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + n.ApplyToList(&opts.ListOptions) +} + +// Limit specifies the maximum number of results to return from the server. +// Limit does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Limit int64 + +// ApplyToList applies this configuration to the given an list options. +func (l Limit) ApplyToList(opts *ListOptions) { + opts.Limit = int64(l) +} + +// Continue sets a continuation token to retrieve chunks of results when using limit. +// Continue does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Continue string + +// ApplyToList applies this configuration to the given an List options. +func (c Continue) ApplyToList(opts *ListOptions) { + opts.Continue = string(c) +} + +// }}} + +// {{{ Update Options + +// UpdateOptions contains options for create requests. It's generally a subset +// of metav1.UpdateOptions. +type UpdateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw UpdateOptions, as passed to the API server. + Raw *metav1.UpdateOptions +} + +// AsUpdateOptions returns these options as a metav1.UpdateOptions. +// This may mutate the Raw field. +func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { + if o == nil { + return &metav1.UpdateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.UpdateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given update options on these options, +// and then returns itself (for convenient chaining). +func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { + for _, opt := range opts { + opt.ApplyToUpdate(o) + } + return o +} + +var _ UpdateOption = &UpdateOptions{} + +// ApplyToUpdate implements UpdateOption +func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { + if o.DryRun != nil { + uo.DryRun = o.DryRun + } + if o.FieldManager != "" { + uo.FieldManager = o.FieldManager + } + if o.Raw != nil { + uo.Raw = o.Raw + } +} + +// UpdateDryRunAll sets the "dry run" option to "all". +// +// Deprecated: Use DryRunAll +var UpdateDryRunAll = DryRunAll + +// }}} + +// {{{ Patch Options + +// PatchOptions contains options for patch requests. +type PatchOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw PatchOptions, as passed to the API server. + Raw *metav1.PatchOptions +} + +// ApplyOptions applies the given patch options on these options, +// and then returns itself (for convenient chaining). +func (o *PatchOptions) ApplyOptions(opts []PatchOption) *PatchOptions { + for _, opt := range opts { + opt.ApplyToPatch(o) + } + return o +} + +// AsPatchOptions returns these options as a metav1.PatchOptions. +// This may mutate the Raw field. +func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { + if o == nil { + return &metav1.PatchOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.PatchOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.Force = o.Force + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +var _ PatchOption = &PatchOptions{} + +// ApplyToPatch implements PatchOptions +func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { + if o.DryRun != nil { + po.DryRun = o.DryRun + } + if o.Force != nil { + po.Force = o.Force + } + if o.FieldManager != "" { + po.FieldManager = o.FieldManager + } + if o.Raw != nil { + po.Raw = o.Raw + } +} + +// ForceOwnership indicates that in case of conflicts with server-side apply, +// the client should acquire ownership of the conflicting field. Most +// controllers should use this. +var ForceOwnership = forceOwnership{} + +type forceOwnership struct{} + +func (forceOwnership) ApplyToPatch(opts *PatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + +// PatchDryRunAll sets the "dry run" option to "all". +// +// Deprecated: Use DryRunAll +var PatchDryRunAll = DryRunAll + +// }}} + +// {{{ DeleteAllOf Options + +// these are all just delete options and list options + +// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests. +// It's just list and delete options smooshed together. +type DeleteAllOfOptions struct { + ListOptions + DeleteOptions +} + +// ApplyOptions applies the given deleteallof options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOfOptions { + for _, opt := range opts { + opt.ApplyToDeleteAllOf(o) + } + return o +} + +var _ DeleteAllOfOption = &DeleteAllOfOptions{} + +// ApplyToDeleteAllOf implements DeleteAllOfOption +func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { + o.ApplyToList(&do.ListOptions) + o.ApplyToDelete(&do.DeleteOptions) +} + +// }}} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go new file mode 100644 index 0000000000..22a093cab0 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go @@ -0,0 +1,193 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" +) + +var ( + // Apply uses server-side apply to patch the given object. + Apply = applyPatch{} + + // Merge uses the raw object as a merge patch, without modifications. + // Use MergeFrom if you wish to compute a diff instead. + Merge = mergePatch{} +) + +type patch struct { + patchType types.PatchType + data []byte +} + +// Type implements Patch. +func (s *patch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *patch) Data(obj runtime.Object) ([]byte, error) { + return s.data, nil +} + +// RawPatch constructs a new Patch with the given PatchType and data. +func RawPatch(patchType types.PatchType, data []byte) Patch { + return &patch{patchType, data} +} + +// ConstantPatch constructs a new Patch with the given PatchType and data. +// +// Deprecated: use RawPatch instead +func ConstantPatch(patchType types.PatchType, data []byte) Patch { + return RawPatch(patchType, data) +} + +// MergeFromWithOptimisticLock can be used if clients want to make sure a patch +// is being applied to the latest resource version of an object. +// +// The behavior is similar to what an Update would do, without the need to send the +// whole object. Usually this method is useful if you might have multiple clients +// acting on the same object and the same API version, but with different versions of the Go structs. +// +// For example, an "older" copy of a Widget that has fields A and B, and a "newer" copy with A, B, and C. +// Sending an update using the older struct definition results in C being dropped, whereas using a patch does not. +type MergeFromWithOptimisticLock struct{} + +// ApplyToMergeFrom applies this configuration to the given patch options. +func (m MergeFromWithOptimisticLock) ApplyToMergeFrom(in *MergeFromOptions) { + in.OptimisticLock = true +} + +// MergeFromOption is some configuration that modifies options for a merge-from patch data. +type MergeFromOption interface { + // ApplyToMergeFrom applies this configuration to the given patch options. + ApplyToMergeFrom(*MergeFromOptions) +} + +// MergeFromOptions contains options to generate a merge-from patch data. +type MergeFromOptions struct { + // OptimisticLock, when true, includes `metadata.resourceVersion` into the final + // patch data. If the `resourceVersion` field doesn't match what's stored, + // the operation results in a conflict and clients will need to try again. + OptimisticLock bool +} + +type mergeFromPatch struct { + from runtime.Object + opts MergeFromOptions +} + +// Type implements patch. +func (s *mergeFromPatch) Type() types.PatchType { + return types.MergePatchType +} + +// Data implements Patch. +func (s *mergeFromPatch) Data(obj runtime.Object) ([]byte, error) { + originalJSON, err := json.Marshal(s.from) + if err != nil { + return nil, err + } + + modifiedJSON, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + data, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return nil, err + } + + if s.opts.OptimisticLock { + dataMap := map[string]interface{}{} + if err := json.Unmarshal(data, &dataMap); err != nil { + return nil, err + } + fromMeta, ok := s.from.(metav1.Object) + if !ok { + return nil, fmt.Errorf("cannot use OptimisticLock, from object %q is not a valid metav1.Object", s.from) + } + resourceVersion := fromMeta.GetResourceVersion() + if len(resourceVersion) == 0 { + return nil, fmt.Errorf("cannot use OptimisticLock, from object %q does not have any resource version we can use", s.from) + } + u := &unstructured.Unstructured{Object: dataMap} + u.SetResourceVersion(resourceVersion) + data, err = json.Marshal(u) + if err != nil { + return nil, err + } + } + + return data, nil +} + +// MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base. +func MergeFrom(obj runtime.Object) Patch { + return &mergeFromPatch{from: obj} +} + +// MergeFromWithOptions creates a Patch that patches using the merge-patch strategy with the given object as base. +func MergeFromWithOptions(obj runtime.Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{from: obj, opts: *options} +} + +// mergePatch uses a raw merge strategy to patch the object. +type mergePatch struct{} + +// Type implements Patch. +func (p mergePatch) Type() types.PatchType { + return types.MergePatchType +} + +// Data implements Patch. +func (p mergePatch) Data(obj runtime.Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} + +// applyPatch uses server-side apply to patch the object. +type applyPatch struct{} + +// Type implements Patch. +func (p applyPatch) Type() types.PatchType { + return types.ApplyPatchType +} + +// Data implements Patch. +func (p applyPatch) Data(obj runtime.Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go new file mode 100644 index 0000000000..47cba9576d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go @@ -0,0 +1,61 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// DelegatingClient forms a Client by composing separate reader, writer and +// statusclient interfaces. This way, you can have an Client that reads from a +// cache and writes to the API server. +type DelegatingClient struct { + Reader + Writer + StatusClient +} + +// DelegatingReader forms a Reader that will cause Get and List requests for +// unstructured types to use the ClientReader while requests for any other type +// of object with use the CacheReader. This avoids accidentally caching the +// entire cluster in the common case of loading arbitrary unstructured objects +// (e.g. from OwnerReferences). +type DelegatingReader struct { + CacheReader Reader + ClientReader Reader +} + +// Get retrieves an obj for a given object key from the Kubernetes Cluster. +func (d *DelegatingReader) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { + _, isUnstructured := obj.(*unstructured.Unstructured) + if isUnstructured { + return d.ClientReader.Get(ctx, key, obj) + } + return d.CacheReader.Get(ctx, key, obj) +} + +// List retrieves list of objects for a given namespace and list options. +func (d *DelegatingReader) List(ctx context.Context, list runtime.Object, opts ...ListOption) error { + _, isUnstructured := list.(*unstructured.UnstructuredList) + if isUnstructured { + return d.ClientReader.List(ctx, list, opts...) + } + return d.CacheReader.List(ctx, list, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go new file mode 100644 index 0000000000..d65f04fe9b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -0,0 +1,201 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type typedClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client +func (c *typedClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + return o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Update implements client.Client +func (c *typedClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := &UpdateOptions{} + updateOpts.ApplyOptions(opts) + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Delete implements client.Client +func (c *typedClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client +func (c *typedClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), c.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client +func (c *typedClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client +func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + return r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + Name(key.Name).Do(ctx).Into(obj) +} + +// List implements client.Client +func (c *typedClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// UpdateStatus used by StatusWriter to write status. +func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + // TODO(droot): examine the returned error and check if it error needs to be + // wrapped to improve the UX ? + // It will be nice to receive an error saying the object doesn't implement + // status subresource and check CRD definition + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// PatchStatus used by StatusWriter to write status. +func (c *typedClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go new file mode 100644 index 0000000000..5613791b9f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -0,0 +1,273 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type unstructuredClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client +func (uc *unstructuredClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + result := o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Update implements client.Client +func (uc *unstructuredClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := UpdateOptions{} + updateOpts.ApplyOptions(opts) + result := o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Delete implements client.Client +func (uc *unstructuredClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { + _, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client +func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + _, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), uc.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client +func (uc *unstructuredClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + _, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client +func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + result := r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + Name(key.Name). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + + return result +} + +// List implements client.Client +func (uc *unstructuredClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { + u, ok := obj.(*unstructured.UnstructuredList) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + if strings.HasSuffix(gvk.Kind, "List") { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + _, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + result := o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + Do(ctx). + Into(u) + + u.SetGroupVersionKind(gvk) + return result +} From 2e25fe481c9bbaf5b2409eae84c4ac9a7caf5dc6 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 11 Aug 2020 13:40:33 +0200 Subject: [PATCH 3/8] capacity: configurable support for SCs with immediate binding The information is not needed when the consumer is the Kubernetes scheduler, but it may be useful to publish also information about storage classes with immediate binding, so it's configurable. --- README.md | 8 +++- cmd/csi-provisioner/csi-provisioner.go | 3 +- pkg/capacity/capacity.go | 10 ++++ pkg/capacity/capacity_test.go | 66 +++++++++++++++++++++++--- pkg/capacity/features.go | 6 +++ 5 files changed, 84 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 3047bf2150..2a536b1c4f 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,7 @@ See the [storage capacity section](#capacity-support) below for details. * `--capacity-poll-interval `: How long the external-provisioner waits before checking for storage capacity changes. Defaults to `1m`. -* `--enable-capacity `: Enables producing CSIStorageCapacity objects with capacity information from the driver's GetCapacity call. Currently supported: `--enable-capacity=central`. +* `--enable-capacity `: Enables producing CSIStorageCapacity objects with capacity information from the driver's GetCapacity call. Can be given more than once and/or with comma-separated values. Currently supported: --enable-capacity=central,immediate-binding. * `--capacity-ownerref-level `: The level indicates the number of objects that need to be traversed starting from the pod identified by the POD_NAME and POD_NAMESPACE environment variables to reach the owning object for CSIStorageCapacity objects: 0 for the pod itself, 1 for a StatefulSet, 2 for a Deployment, etc. Defaults to `1` (= StatefulSet). @@ -154,6 +154,12 @@ To enable this feature in a driver deployment: to detect changed capacity with `--capacity-poll-interval`. - Optional: configure how many worker threads are used in parallel with `--capacity-threads`. +- Optional: enable producing information also for storage classes that + use immediate volume binding with + `--enable-capacity=immediate-binding`. This is usually not needed + because such volumes are created by the driver without involving the + Kubernetes scheduler and thus the published information would just + be ignored. To determine how many different topology segments exist, external-provisioner uses the topology keys and labels that the CSI diff --git a/cmd/csi-provisioner/csi-provisioner.go b/cmd/csi-provisioner/csi-provisioner.go index 24b54ef22e..260046fc65 100644 --- a/cmd/csi-provisioner/csi-provisioner.go +++ b/cmd/csi-provisioner/csi-provisioner.go @@ -84,7 +84,7 @@ var ( capacityFeatures = func() *capacity.Features { capacity := &capacity.Features{} - flag.Var(capacity, "enable-capacity", "Enables producing CSIStorageCapacity objects with capacity information from the driver's GetCapacity call. Currently supported: --enable-capacity=central.") + flag.Var(capacity, "enable-capacity", "Enables producing CSIStorageCapacity objects with capacity information from the driver's GetCapacity call. Can be given more than once and/or with comma-separated values. Currently supported: --enable-capacity=central,immediate-binding.") return capacity }() capacityPollInterval = flag.Duration("capacity-poll-interval", time.Minute, "How long the external-provisioner waits before checking for storage capacity changes.") @@ -326,6 +326,7 @@ func main() { factory.Storage().V1().StorageClasses(), factoryForNamespace.Storage().V1alpha1().CSIStorageCapacities(), *capacityPollInterval, + (*capacityFeatures)[capacity.FeatureImmediateBinding], ) } diff --git a/pkg/capacity/capacity.go b/pkg/capacity/capacity.go index 030686a5f1..20306f3cc9 100644 --- a/pkg/capacity/capacity.go +++ b/pkg/capacity/capacity.go @@ -83,6 +83,7 @@ type Controller struct { scInformer storageinformersv1.StorageClassInformer cInformer storageinformersv1alpha1.CSIStorageCapacityInformer pollPeriod time.Duration + immediateBinding bool // capacities contains one entry for each object that is supposed // to exist. @@ -122,6 +123,7 @@ func NewCentralCapacityController( scInformer storageinformersv1.StorageClassInformer, cInformer storageinformersv1alpha1.CSIStorageCapacityInformer, pollPeriod time.Duration, + immediateBinding bool, ) *Controller { c := &Controller{ csiController: csiController, @@ -134,6 +136,7 @@ func NewCentralCapacityController( scInformer: scInformer, cInformer: cInformer, pollPeriod: pollPeriod, + immediateBinding: immediateBinding, capacities: map[workItem]*storagev1alpha1.CSIStorageCapacity{}, } @@ -247,6 +250,9 @@ func (c *Controller) onTopologyChanges(added []*topology.Segment, removed []*top if sc.Provisioner != c.driverName { continue } + if !c.immediateBinding && sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingImmediate { + return + } for _, segment := range added { c.addWorkItem(segment, sc) } @@ -264,6 +270,10 @@ func (c *Controller) onSCAddOrUpdate(sc *storagev1.StorageClass) { } klog.V(3).Infof("Capacity Controller: storage class %s was updated or added", sc.Name) + if !c.immediateBinding && sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingImmediate { + klog.V(3).Infof("Capacity Controller: ignoring storage class %s because it uses immediate binding", sc.Name) + return + } segments := c.topologyInformer.List() c.capacitiesLock.Lock() diff --git a/pkg/capacity/capacity_test.go b/pkg/capacity/capacity_test.go index aa33ab8ddd..86a2423c43 100644 --- a/pkg/capacity/capacity_test.go +++ b/pkg/capacity/capacity_test.go @@ -87,6 +87,7 @@ var ( // several different changes at runtime correctly. func TestController(t *testing.T) { testcases := map[string]struct { + immediateBinding bool topology mockTopology storage mockCapacity initialSCs []testSC @@ -137,6 +138,50 @@ func TestController(t *testing.T) { }, }, }, + "ignore SC with immediate binding": { + topology: mockTopology{ + segments: []*topology.Segment{&layer0}, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + }, + }, + initialSCs: []testSC{ + { + name: "other-sc", + driverName: driverName, + immediateBinding: true, + }, + }, + }, + "support SC with immediate binding": { + immediateBinding: true, + topology: mockTopology{ + segments: []*topology.Segment{&layer0}, + }, + storage: mockCapacity{ + capacity: map[string]interface{}{ + // This matches layer0. + "foo": "1Gi", + }, + }, + initialSCs: []testSC{ + { + name: "other-sc", + driverName: driverName, + immediateBinding: true, + }, + }, + expectedCapacities: []testCapacity{ + { + segment: layer0, + storageClassName: "other-sc", + quantity: "1Gi", + }, + }, + }, "reuse one capacity object, no changes": { topology: mockTopology{ segments: []*topology.Segment{&layer0}, @@ -735,7 +780,7 @@ func TestController(t *testing.T) { clientSet := fakeclientset.NewSimpleClientset(objects...) clientSet.PrependReactor("create", "csistoragecapacities", createCSIStorageCapacityReactor()) clientSet.PrependReactor("update", "csistoragecapacities", updateCSIStorageCapacityReactor()) - c := fakeController(ctx, clientSet, &tc.storage, &tc.topology) + c := fakeController(ctx, clientSet, &tc.storage, &tc.topology, tc.immediateBinding) for _, testCapacity := range tc.initialCapacities { capacity := makeCapacity(testCapacity) _, err := clientSet.StorageV1alpha1().CSIStorageCapacities(ownerNamespace).Create(ctx, capacity, metav1.CreateOptions{}) @@ -892,7 +937,7 @@ func updateCSIStorageCapacityReactor() func(action ktesting.Action) (handled boo } } -func fakeController(ctx context.Context, client *fakeclientset.Clientset, storage CSICapacityClient, topologyInformer topology.Informer) *Controller { +func fakeController(ctx context.Context, client *fakeclientset.Clientset, storage CSICapacityClient, topologyInformer topology.Informer, immediateBinding bool) *Controller { utilruntime.ReallyCrash = false // avoids os.Exit after "close of closed channel" in shared informer code // We don't need resyncs, they just lead to confusing log output if they get triggered while already some @@ -915,6 +960,7 @@ func fakeController(ctx context.Context, client *fakeclientset.Clientset, storag scInformer, cInformer, 1000*time.Hour, // Not used, but even if it was, we wouldn't want automatic capacity polling while the test runs... + immediateBinding, ) // This ensures that the informers are running and up-to-date. @@ -1098,18 +1144,24 @@ func makeCapacity(in testCapacity) *storagev1alpha1.CSIStorageCapacity { } type testSC struct { - name string - driverName string - parameters map[string]string + name string + driverName string + parameters map[string]string + immediateBinding bool } func makeSC(in testSC) *storagev1.StorageClass { + volumeBinding := storagev1.VolumeBindingWaitForFirstConsumer + if in.immediateBinding { + volumeBinding = storagev1.VolumeBindingImmediate + } return &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: in.name, }, - Provisioner: in.driverName, - Parameters: in.parameters, + Provisioner: in.driverName, + Parameters: in.parameters, + VolumeBindingMode: &volumeBinding, } } diff --git a/pkg/capacity/features.go b/pkg/capacity/features.go index 4835ad6d9f..d7a92fd50c 100644 --- a/pkg/capacity/features.go +++ b/pkg/capacity/features.go @@ -39,6 +39,12 @@ const ( // FeatureLocal enables the mode where external-provisioner // is deployed on each node. Not implemented yet. FeatureLocal = Feature("local") + + // FeatureImmediateBinding enables the publishing of information + // for storage classes with immediate binding. Off by default + // because normally that information is not used as the Kubernetes + // scheduler lets the driver pick a topology segment. + FeatureImmediateBinding = Feature("immediate-binding") ) // Set enables the named features. Multiple features can be listed, separated by commas, From 075dba790ad597c0104c0267e2503ef8d1f5eb4d Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 13 Aug 2020 14:51:49 +0200 Subject: [PATCH 4/8] capacity: unit test for owner.Lookup --- pkg/owner/owner_test.go | 194 +++++++++ vendor/modules.txt | 2 + .../pkg/client/fake/client.go | 409 ++++++++++++++++++ .../controller-runtime/pkg/client/fake/doc.go | 33 ++ .../pkg/internal/objectutil/filter.go | 42 ++ 5 files changed, 680 insertions(+) create mode 100644 pkg/owner/owner_test.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go diff --git a/pkg/owner/owner_test.go b/pkg/owner/owner_test.go new file mode 100644 index 0000000000..4f3b0c3bb3 --- /dev/null +++ b/pkg/owner/owner_test.go @@ -0,0 +1,194 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package owner + +import ( + "fmt" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + testNamespace = "test-namespace" + otherNamespace = "other-namespace" + statefulsetGkv = schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "StatefulSet", + } + deploymentGkv = schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + } + replicasetGkv = schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "ReplicaSet", + } + podGkv = schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + } + + pod = makeObject(testNamespace, "foo", podGkv, nil) + statefulset = makeObject(testNamespace, "foo", statefulsetGkv, nil) + statefulsetPod = makeObject(testNamespace, "foo", podGkv, &statefulset) + deployment = makeObject(testNamespace, "foo", deploymentGkv, nil) + replicaset = makeObject(testNamespace, "foo", replicasetGkv, &deployment) + otherReplicaset = makeObject(testNamespace, "bar", replicasetGkv, &deployment) + yetAnotherReplicaset = makeObject(otherNamespace, "foo", replicasetGkv, &deployment) + deploymentsetPod = makeObject(testNamespace, "foo", podGkv, &replicaset) +) + +// TestNodeTopology checks that node labels are correctly transformed +// into topology segments. +func TestNodeTopology(t *testing.T) { + testcases := map[string]struct { + objects []runtime.Object + start unstructured.Unstructured + levels int + expectError bool + expectOwner unstructured.Unstructured + }{ + "empty": { + start: pod, + expectError: true, + }, + "pod-itself": { + objects: []runtime.Object{&pod}, + start: pod, + levels: 0, + expectOwner: pod, + }, + "no-parent": { + objects: []runtime.Object{&pod}, + start: pod, + levels: 1, + expectError: true, + }, + "parent": { + objects: []runtime.Object{&statefulsetPod}, + start: statefulsetPod, + levels: 1, + // The object doesn't have to exist. + expectOwner: statefulset, + }, + "missing-parent": { + objects: []runtime.Object{&deploymentsetPod}, + start: deploymentsetPod, + levels: 2, + expectError: true, + }, + "wrong-parent": { + objects: []runtime.Object{&deploymentsetPod, &otherReplicaset}, + start: deploymentsetPod, + levels: 2, + expectError: true, + }, + "another-wrong-parent": { + objects: []runtime.Object{&deploymentsetPod, &yetAnotherReplicaset}, + start: deploymentsetPod, + levels: 2, + expectError: true, + }, + "grandparent": { + objects: []runtime.Object{&deploymentsetPod, &replicaset}, + start: deploymentsetPod, + levels: 2, + // The object doesn't have to exist. + expectOwner: deployment, + }, + } + + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + c := fake.NewFakeClient(tc.objects...) + gkv := tc.start.GroupVersionKind() + ownerRef, err := lookupRecursive(c, + tc.start.GetNamespace(), + tc.start.GetName(), + gkv.Group, + gkv.Version, + gkv.Kind, + tc.levels) + if err != nil && !tc.expectError { + t.Fatalf("unexpected error: %v", err) + } + if err == nil && tc.expectError { + t.Fatal("unexpected success") + } + if err == nil { + if ownerRef == nil { + t.Fatal("unexpected nil owner") + } + gkv := tc.expectOwner.GroupVersionKind() + apiVersion := metav1.GroupVersion{Group: gkv.Group, Version: gkv.Version}.String() + if ownerRef.APIVersion != apiVersion { + t.Errorf("expected APIVersion %q, got %q", apiVersion, ownerRef.APIVersion) + } + if ownerRef.Kind != gkv.Kind { + t.Errorf("expected Kind %q, got %q", gkv.Kind, ownerRef.Kind) + } + if ownerRef.Name != tc.expectOwner.GetName() { + t.Errorf("expected Name %q, got %q", tc.expectOwner.GetName(), ownerRef.Name) + } + if ownerRef.UID != tc.expectOwner.GetUID() { + t.Errorf("expected UID %q, got %q", tc.expectOwner.GetUID(), ownerRef.UID) + } + if ownerRef.Controller == nil || !*ownerRef.Controller { + t.Error("Controller field should true") + } + if ownerRef.BlockOwnerDeletion != nil && *ownerRef.BlockOwnerDeletion { + t.Error("BlockOwnerDeletion field should false") + } + } + }) + } +} + +var uidCounter int + +func makeObject(namespace, name string, gkv schema.GroupVersionKind, owner *unstructured.Unstructured) unstructured.Unstructured { + u := unstructured.Unstructured{} + u.SetNamespace(namespace) + u.SetName(name) + u.SetGroupVersionKind(gkv) + uidCounter++ + u.SetUID(types.UID(fmt.Sprintf("FAKE-UID-%d", uidCounter))) + if owner != nil { + isTrue := true + u.SetOwnerReferences([]metav1.OwnerReference{ + { + APIVersion: owner.GetAPIVersion(), + Kind: owner.GetKind(), + Name: owner.GetName(), + UID: owner.GetUID(), + Controller: &isTrue, + }, + }) + } + return u +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 88d921be01..5c397bbda6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -543,6 +543,8 @@ k8s.io/utils/trace # sigs.k8s.io/controller-runtime v0.6.2 sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil +sigs.k8s.io/controller-runtime/pkg/client/fake +sigs.k8s.io/controller-runtime/pkg/internal/objectutil # sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.1.0-rc1 sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller/metrics diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go new file mode 100644 index 0000000000..e7d3c5c35b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go @@ -0,0 +1,409 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/testing" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +type versionedTracker struct { + testing.ObjectTracker +} + +type fakeClient struct { + tracker versionedTracker + scheme *runtime.Scheme +} + +var _ client.Client = &fakeClient{} + +const ( + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +// NewFakeClient creates a new fake client for testing. +// You can choose to initialize it with a slice of runtime.Object. +// Deprecated: use NewFakeClientWithScheme. You should always be +// passing an explicit Scheme. +func NewFakeClient(initObjs ...runtime.Object) client.Client { + return NewFakeClientWithScheme(scheme.Scheme, initObjs...) +} + +// NewFakeClientWithScheme creates a new fake client with the given scheme +// for testing. +// You can choose to initialize it with a slice of runtime.Object. +func NewFakeClientWithScheme(clientScheme *runtime.Scheme, initObjs ...runtime.Object) client.Client { + tracker := testing.NewObjectTracker(clientScheme, scheme.Codecs.UniversalDecoder()) + for _, obj := range initObjs { + err := tracker.Add(obj) + if err != nil { + panic(fmt.Errorf("failed to add object %v to fake client: %w", obj, err)) + } + } + return &fakeClient{ + tracker: versionedTracker{tracker}, + scheme: clientScheme, + } +} + +func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + if accessor.GetName() == "" { + return apierrors.NewInvalid( + obj.GetObjectKind().GroupVersionKind().GroupKind(), + accessor.GetName(), + field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) + } + if accessor.GetResourceVersion() != "" { + return apierrors.NewBadRequest("resourceVersion can not be set for Create requests") + } + accessor.SetResourceVersion("1") + if err := t.ObjectTracker.Create(gvr, obj, ns); err != nil { + accessor.SetResourceVersion("") + return err + } + return nil +} + +func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %v", err) + } + if accessor.GetName() == "" { + return apierrors.NewInvalid( + obj.GetObjectKind().GroupVersionKind().GroupKind(), + accessor.GetName(), + field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) + } + oldObject, err := t.ObjectTracker.Get(gvr, ns, accessor.GetName()) + if err != nil { + return err + } + oldAccessor, err := meta.Accessor(oldObject) + if err != nil { + return err + } + if accessor.GetResourceVersion() != oldAccessor.GetResourceVersion() { + return apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), errors.New("object was modified")) + } + if oldAccessor.GetResourceVersion() == "" { + oldAccessor.SetResourceVersion("0") + } + intResourceVersion, err := strconv.ParseUint(oldAccessor.GetResourceVersion(), 10, 64) + if err != nil { + return fmt.Errorf("can not convert resourceVersion %q to int: %v", oldAccessor.GetResourceVersion(), err) + } + intResourceVersion++ + accessor.SetResourceVersion(strconv.FormatUint(intResourceVersion, 10)) + return t.ObjectTracker.Update(gvr, obj, ns) +} + +func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + o, err := c.tracker.Get(gvr, key.Namespace, key.Name) + if err != nil { + return err + } + + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + j, err := json.Marshal(o) + if err != nil { + return err + } + decoder := scheme.Codecs.UniversalDecoder() + _, _, err = decoder.Decode(j, nil, obj) + return err +} + +func (c *fakeClient) List(ctx context.Context, obj runtime.Object, opts ...client.ListOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + OriginalKind := gvk.Kind + + if !strings.HasSuffix(gvk.Kind, "List") { + return fmt.Errorf("non-list type %T (kind %q) passed as output", obj, gvk) + } + // we need the non-list GVK, so chop off the "List" from the end of the kind + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + o, err := c.tracker.List(gvr, gvk, listOpts.Namespace) + if err != nil { + return err + } + + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(OriginalKind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + j, err := json.Marshal(o) + if err != nil { + return err + } + decoder := scheme.Codecs.UniversalDecoder() + _, _, err = decoder.Decode(j, nil, obj) + if err != nil { + return err + } + + if listOpts.LabelSelector != nil { + objs, err := meta.ExtractList(obj) + if err != nil { + return err + } + filteredObjs, err := objectutil.FilterWithLabels(objs, listOpts.LabelSelector) + if err != nil { + return err + } + err = meta.SetList(obj, filteredObjs) + if err != nil { + return err + } + } + return nil +} + +func (c *fakeClient) Create(ctx context.Context, obj runtime.Object, opts ...client.CreateOption) error { + createOptions := &client.CreateOptions{} + createOptions.ApplyOptions(opts) + + for _, dryRunOpt := range createOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + if accessor.GetName() == "" && accessor.GetGenerateName() != "" { + base := accessor.GetGenerateName() + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + accessor.SetName(fmt.Sprintf("%s%s", base, utilrand.String(randomLength))) + } + + return c.tracker.Create(gvr, obj, accessor.GetNamespace()) +} + +func (c *fakeClient) Delete(ctx context.Context, obj runtime.Object, opts ...client.DeleteOption) error { + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + delOptions := client.DeleteOptions{} + delOptions.ApplyOptions(opts) + + //TODO: implement propagation + return c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) +} + +func (c *fakeClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...client.DeleteAllOfOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + dcOptions := client.DeleteAllOfOptions{} + dcOptions.ApplyOptions(opts) + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + o, err := c.tracker.List(gvr, gvk, dcOptions.Namespace) + if err != nil { + return err + } + + objs, err := meta.ExtractList(o) + if err != nil { + return err + } + filteredObjs, err := objectutil.FilterWithLabels(objs, dcOptions.LabelSelector) + if err != nil { + return err + } + for _, o := range filteredObjs { + accessor, err := meta.Accessor(o) + if err != nil { + return err + } + err = c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) + if err != nil { + return err + } + } + return nil +} + +func (c *fakeClient) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { + updateOptions := &client.UpdateOptions{} + updateOptions.ApplyOptions(opts) + + for _, dryRunOpt := range updateOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + return c.tracker.Update(gvr, obj, accessor.GetNamespace()) +} + +func (c *fakeClient) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOption) error { + patchOptions := &client.PatchOptions{} + patchOptions.ApplyOptions(opts) + + for _, dryRunOpt := range patchOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + data, err := patch.Data(obj) + if err != nil { + return err + } + + reaction := testing.ObjectReaction(c.tracker) + handled, o, err := reaction(testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data)) + if err != nil { + return err + } + if !handled { + panic("tracker could not handle patch method") + } + + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + j, err := json.Marshal(o) + if err != nil { + return err + } + decoder := scheme.Codecs.UniversalDecoder() + _, _, err = decoder.Decode(j, nil, obj) + return err +} + +func (c *fakeClient) Status() client.StatusWriter { + return &fakeStatusWriter{client: c} +} + +func getGVRFromObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionResource, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + return gvr, nil +} + +type fakeStatusWriter struct { + client *fakeClient +} + +func (sw *fakeStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { + // TODO(droot): This results in full update of the obj (spec + status). Need + // a way to update status field only. + return sw.client.Update(ctx, obj, opts...) +} + +func (sw *fakeStatusWriter) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOption) error { + // TODO(droot): This results in full update of the obj (spec + status). Need + // a way to update status field only. + return sw.client.Patch(ctx, obj, patch, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go new file mode 100644 index 0000000000..a45d703320 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package fake provides a fake client for testing. + +Deprecated: please use pkg/envtest for testing. This package will be dropped +before the v1.0.0 release. + +An fake client is backed by its simple object store indexed by GroupVersionResource. +You can create a fake client with optional objects. + + client := NewFakeClient(initObjs...) // initObjs is a slice of runtime.Object + +You can invoke the methods defined in the Client interface. + +When it doubt, it's almost always better not to use this package and instead use +envtest.Environment with a real client and API server. +*/ +package fake diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go new file mode 100644 index 0000000000..8513846e2c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objectutil + +import ( + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +// FilterWithLabels returns a copy of the items in objs matching labelSel +func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { + outItems := make([]runtime.Object, 0, len(objs)) + for _, obj := range objs { + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + outItems = append(outItems, obj.DeepCopyObject()) + } + return outItems, nil +} From 0a4f2b306367bf8ef0983c6a3c2d5d8f786ebfe7 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 13 Aug 2020 18:32:03 +0200 Subject: [PATCH 5/8] capacity: check casts of watch objects, handle DeletedFinalStateUnknown DeletedFinalStateUnknown is something that may be handed to the delete callback. It can and should be handled. Because there might be other, currently unexpected objects in the future, it's better to handle them gracefully with a checked cast and proper logging. --- pkg/capacity/capacity.go | 60 +++++++++++++++++++++++++++---- pkg/capacity/topology/nodes.go | 66 +++++++++++++++++++++++++++++----- 2 files changed, 111 insertions(+), 15 deletions(-) diff --git a/pkg/capacity/capacity.go b/pkg/capacity/capacity.go index 20306f3cc9..9cd5927528 100644 --- a/pkg/capacity/capacity.go +++ b/pkg/capacity/capacity.go @@ -143,9 +143,34 @@ func NewCentralCapacityController( // Now register for changes. Depending on the implementation of the informers, // this may already invoke callbacks. handler := cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.onSCAddOrUpdate(obj.(*storagev1.StorageClass)) }, - UpdateFunc: func(_ interface{}, newObj interface{}) { c.onSCAddOrUpdate(newObj.(*storagev1.StorageClass)) }, - DeleteFunc: func(obj interface{}) { c.onSCDelete(obj.(*storagev1.StorageClass)) }, + AddFunc: func(obj interface{}) { + sc, ok := obj.(*storagev1.StorageClass) + if !ok { + klog.Errorf("added object: expected StorageClass, got %T -> ignoring it", obj) + return + } + c.onSCAddOrUpdate(sc) + }, + UpdateFunc: func(_ interface{}, newObj interface{}) { + sc, ok := newObj.(*storagev1.StorageClass) + if !ok { + klog.Errorf("updated object: expected StorageClass, got %T -> ignoring it", newObj) + return + } + c.onSCAddOrUpdate(sc) + }, + DeleteFunc: func(obj interface{}) { + // Beware of "xxx deleted" events + if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { + obj = unknown.Obj + } + sc, ok := obj.(*storagev1.StorageClass) + if !ok { + klog.Errorf("deleted object: expected StorageClass, got %T -> ignoring it", obj) + return + } + c.onSCDelete(sc) + }, } c.scInformer.Informer().AddEventHandler(handler) c.topologyInformer.AddCallback(c.onTopologyChanges) @@ -211,11 +236,34 @@ func (c *Controller) prepare(ctx context.Context) { // for all objects immediately when adding it. klog.V(3).Info("Checking for existing CSIStorageCapacity objects") handler := cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.onCAddOrUpdate(ctx, obj.(*storagev1alpha1.CSIStorageCapacity)) }, + AddFunc: func(obj interface{}) { + csc, ok := obj.(*storagev1alpha1.CSIStorageCapacity) + if !ok { + klog.Errorf("added object: expected CSIStorageCapacity, got %T -> ignoring it", obj) + return + } + c.onCAddOrUpdate(ctx, csc) + }, UpdateFunc: func(_ interface{}, newObj interface{}) { - c.onCAddOrUpdate(ctx, newObj.(*storagev1alpha1.CSIStorageCapacity)) + csc, ok := newObj.(*storagev1alpha1.CSIStorageCapacity) + if !ok { + klog.Errorf("updated object: expected CSIStorageCapacity, got %T -> ignoring it", newObj) + return + } + c.onCAddOrUpdate(ctx, csc) + }, + DeleteFunc: func(obj interface{}) { + // Beware of "xxx deleted" events + if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { + obj = unknown.Obj + } + csc, ok := obj.(*storagev1alpha1.CSIStorageCapacity) + if !ok { + klog.Errorf("deleted object: expected CSIStorageCapacity, got %T -> ignoring it", obj) + return + } + c.onCDelete(ctx, csc) }, - DeleteFunc: func(obj interface{}) { c.onCDelete(ctx, obj.(*storagev1alpha1.CSIStorageCapacity)) }, } c.cInformer.Informer().AddEventHandler(handler) capacities, err := c.cInformer.Lister().List(labels.Everything()) diff --git a/pkg/capacity/topology/nodes.go b/pkg/capacity/topology/nodes.go index adeb9f7f52..bb986c1af7 100644 --- a/pkg/capacity/topology/nodes.go +++ b/pkg/capacity/topology/nodes.go @@ -61,40 +61,88 @@ func NewNodeTopology( // a bit and just remember that there is work to be done. nodeHandler := cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - klog.V(5).Infof("capacity topology: new node: %s", obj.(*v1.Node).Name) + node, ok := obj.(*v1.Node) + if !ok { + klog.Errorf("added object: expected Node, got %T -> ignoring it", obj) + return + } + klog.V(5).Infof("capacity topology: new node: %s", node.Name) queue.Add("") }, UpdateFunc: func(oldObj interface{}, newObj interface{}) { - if reflect.DeepEqual(oldObj.(*v1.Node).Labels, newObj.(*v1.Node).Labels) { + oldNode, ok := oldObj.(*v1.Node) + if !ok { + klog.Errorf("original object: expected Node, got %T -> ignoring it", oldObj) + return + } + newNode, ok := newObj.(*v1.Node) + if !ok { + klog.Errorf("updated object: expected Node, got %T -> ignoring it", newObj) + return + } + if reflect.DeepEqual(oldNode.Labels, newNode.Labels) { // Shortcut: labels haven't changed, no need to sync. return } - klog.V(5).Infof("capacity topology: updated node: %s", newObj.(*v1.Node).Name) + klog.V(5).Infof("capacity topology: updated node: %s", newNode.Name) queue.Add("") }, DeleteFunc: func(obj interface{}) { - klog.V(5).Infof("capacity topology: removed node: %s", obj.(*v1.Node).Name) + // Beware of "xxx deleted" events + if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { + obj = unknown.Obj + } + node, ok := obj.(*v1.Node) + if !ok { + klog.Errorf("deleted object: expected Node, got %T -> ignoring it", obj) + return + } + klog.V(5).Infof("capacity topology: removed node: %s", node.Name) queue.Add("") }, } nodeInformer.Informer().AddEventHandler(nodeHandler) csiNodeHandler := cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - klog.V(5).Infof("capacity topology: new CSINode: %s", obj.(*storagev1.CSINode).Name) + csiNode, ok := obj.(*storagev1.CSINode) + if !ok { + klog.Errorf("added object: expected CSINode, got %T -> ignoring it", obj) + return + } + klog.V(5).Infof("capacity topology: new CSINode: %s", csiNode.Name) queue.Add("") }, UpdateFunc: func(oldObj interface{}, newObj interface{}) { - oldKeys := nt.driverTopologyKeys(oldObj.(*storagev1.CSINode)) - newKeys := nt.driverTopologyKeys(newObj.(*storagev1.CSINode)) + oldCSINode, ok := oldObj.(*storagev1.CSINode) + if !ok { + klog.Errorf("original object: expected CSINode, got %T -> ignoring it", oldObj) + return + } + newCSINode, ok := newObj.(*storagev1.CSINode) + if !ok { + klog.Errorf("updated object: expected CSINode, got %T -> ignoring it", newObj) + return + } + oldKeys := nt.driverTopologyKeys(oldCSINode) + newKeys := nt.driverTopologyKeys(newCSINode) if reflect.DeepEqual(oldKeys, newKeys) { // Shortcut: keys haven't changed, no need to sync. return } - klog.V(5).Infof("capacity topology: updated CSINode: %s", newObj.(*storagev1.CSINode).Name) + klog.V(5).Infof("capacity topology: updated CSINode: %s", newCSINode.Name) queue.Add("") }, DeleteFunc: func(obj interface{}) { - klog.V(5).Infof("capacity topology: removed CSINode: %s", obj.(*storagev1.CSINode).Name) + // Beware of "xxx deleted" events + if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { + obj = unknown.Obj + } + csiNode, ok := obj.(*storagev1.CSINode) + if !ok { + klog.Errorf("deleted object: expected CSINode, got %T -> ignoring it", obj) + return + } + klog.V(5).Infof("capacity topology: removed CSINode: %s", csiNode.Name) queue.Add("") }, } From 98ee97011fe2b3e3a1a12a0d49109f80a12feeeb Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 14 Aug 2020 10:15:03 +0200 Subject: [PATCH 6/8] capacity: full YAML example --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 2a536b1c4f..e207162cd6 100644 --- a/README.md +++ b/README.md @@ -128,7 +128,10 @@ information](https://kubernetes.io/docs/concepts/storage/storage-capacity] when selecting nodes for pods with unbound volumes that wait for the first consumer. -To enable this feature in a driver deployment: +To enable this feature in a driver deployment (see also the +[`deploy/kubernetes/storage-capacity.yaml`](deploy/kubernetes/storage-capacity.yaml) +example): + - Set the `POD_NAME` and `POD_NAMESPACE` environment variables like this: ```yaml env: From de7eba5ec54c3f576a3480b6199b118e09c57ddc Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 14 Aug 2020 20:27:41 +0200 Subject: [PATCH 7/8] capacity: document ownership design --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index e207162cd6..8f4275eef7 100644 --- a/README.md +++ b/README.md @@ -128,6 +128,15 @@ information](https://kubernetes.io/docs/concepts/storage/storage-capacity] when selecting nodes for pods with unbound volumes that wait for the first consumer. +Currently, all CSIStorageCapacity objects created by an instance of +the external-provisioner must have the same +[owner](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#owners-and-dependents). That +owner is how external-provisioner distinguishes between objects that +it must manage and those that it must leave alone. The owner is +determine with the `POD_NAME/POD_NAMESPACE` environment variables and +the `--capacity-ownerref-level` parameter. Other solutions will be +added in the future. + To enable this feature in a driver deployment (see also the [`deploy/kubernetes/storage-capacity.yaml`](deploy/kubernetes/storage-capacity.yaml) example): From e50daf34473dc838e1fec0c81eab00084703a5d0 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 17 Aug 2020 10:46:16 +0200 Subject: [PATCH 8/8] capacity: separate flags for mode and immediate binding `--enable-capacity` was originally designed to be a multi-value boolean map. After removing options from it, replacing it with a single-value string makes more sense. --- README.md | 4 +- cmd/csi-provisioner/csi-provisioner.go | 17 +++--- pkg/capacity/features.go | 83 -------------------------- pkg/capacity/features_test.go | 81 ------------------------- pkg/capacity/mode.go | 63 +++++++++++++++++++ 5 files changed, 75 insertions(+), 173 deletions(-) delete mode 100644 pkg/capacity/features.go delete mode 100644 pkg/capacity/features_test.go create mode 100644 pkg/capacity/mode.go diff --git a/README.md b/README.md index 8f4275eef7..d320886034 100644 --- a/README.md +++ b/README.md @@ -78,10 +78,12 @@ See the [storage capacity section](#capacity-support) below for details. * `--capacity-poll-interval `: How long the external-provisioner waits before checking for storage capacity changes. Defaults to `1m`. -* `--enable-capacity `: Enables producing CSIStorageCapacity objects with capacity information from the driver's GetCapacity call. Can be given more than once and/or with comma-separated values. Currently supported: --enable-capacity=central,immediate-binding. +* `--capacity-controller-deployment-mode central|none`: Enables producing CSIStorageCapacity objects with capacity information from the driver's GetCapacity call. 'central' is currently the only supported mode. Use it when there is just one active provisioner in the cluster. Defaults to `none`. * `--capacity-ownerref-level `: The level indicates the number of objects that need to be traversed starting from the pod identified by the POD_NAME and POD_NAMESPACE environment variables to reach the owning object for CSIStorageCapacity objects: 0 for the pod itself, 1 for a StatefulSet, 2 for a Deployment, etc. Defaults to `1` (= StatefulSet). +* `--capacity-for-immediate-binding `: Enables producing capacity information for storage classes with immediate binding. Not needed for the Kubernetes scheduler, maybe useful for other consumers or for debugging. Defaults to `false`. + #### Other recognized arguments * `--feature-gates `: A set of comma separated `=` pairs that describe feature gates for alpha/experimental features. See [list of features](#feature-status) or `--help` output for list of recognized features. Example: `--feature-gates Topology=true` to enable Topology feature that's disabled by default. diff --git a/cmd/csi-provisioner/csi-provisioner.go b/cmd/csi-provisioner/csi-provisioner.go index 260046fc65..1f90bbd677 100644 --- a/cmd/csi-provisioner/csi-provisioner.go +++ b/cmd/csi-provisioner/csi-provisioner.go @@ -82,13 +82,14 @@ var ( kubeAPIQPS = flag.Float32("kube-api-qps", 5, "QPS to use while communicating with the kubernetes apiserver. Defaults to 5.0.") kubeAPIBurst = flag.Int("kube-api-burst", 10, "Burst to use while communicating with the kubernetes apiserver. Defaults to 10.") - capacityFeatures = func() *capacity.Features { - capacity := &capacity.Features{} - flag.Var(capacity, "enable-capacity", "Enables producing CSIStorageCapacity objects with capacity information from the driver's GetCapacity call. Can be given more than once and/or with comma-separated values. Currently supported: --enable-capacity=central,immediate-binding.") - return capacity + capacityMode = func() *capacity.DeploymentMode { + mode := capacity.DeploymentModeNone + flag.Var(&mode, "capacity-controller-deployment-mode", "Enables producing CSIStorageCapacity objects with capacity information from the driver's GetCapacity call. 'central' is currently the only supported mode. Use it when there is just one active provisioner in the cluster.") + return &mode }() - capacityPollInterval = flag.Duration("capacity-poll-interval", time.Minute, "How long the external-provisioner waits before checking for storage capacity changes.") - capacityOwnerrefLevel = flag.Int("capacity-ownerref-level", 1, "The level indicates the number of objects that need to be traversed starting from the pod identified by the POD_NAME and POD_NAMESPACE environment variables to reach the owning object for CSIStorageCapacity objects: 0 for the pod itself, 1 for a StatefulSet, 2 for a Deployment, etc.") + capacityImmediateBinding = flag.Bool("capacity-for-immediate-binding", false, "Enables producing capacity information for storage classes with immediate binding. Not needed for the Kubernetes scheduler, maybe useful for other consumers or for debugging.") + capacityPollInterval = flag.Duration("capacity-poll-interval", time.Minute, "How long the external-provisioner waits before checking for storage capacity changes.") + capacityOwnerrefLevel = flag.Int("capacity-ownerref-level", 1, "The level indicates the number of objects that need to be traversed starting from the pod identified by the POD_NAME and POD_NAMESPACE environment variables to reach the owning object for CSIStorageCapacity objects: 0 for the pod itself, 1 for a StatefulSet, 2 for a Deployment, etc.") featureGates map[string]bool provisionController *controller.ProvisionController @@ -282,7 +283,7 @@ func main() { ) var capacityController *capacity.Controller - if (*capacityFeatures)[capacity.FeatureCentral] { + if *capacityMode == capacity.DeploymentModeCentral { podName := os.Getenv("POD_NAME") namespace := os.Getenv("POD_NAMESPACE") if podName == "" || namespace == "" { @@ -326,7 +327,7 @@ func main() { factory.Storage().V1().StorageClasses(), factoryForNamespace.Storage().V1alpha1().CSIStorageCapacities(), *capacityPollInterval, - (*capacityFeatures)[capacity.FeatureImmediateBinding], + *capacityImmediateBinding, ) } diff --git a/pkg/capacity/features.go b/pkg/capacity/features.go deleted file mode 100644 index d7a92fd50c..0000000000 --- a/pkg/capacity/features.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package capacity - -import ( - "fmt" - "strings" - - flag "github.com/spf13/pflag" -) - -// Feature is the type for named features supported by the capacity -// controller. -type Feature string - -// Features are disabled by default. -type Features map[Feature]bool - -const ( - // FeatureCentral enables the mode where there is only one - // external-provisioner actively running in the cluster which - // talks to the CSI driver's controller. - FeatureCentral = Feature("central") - - // FeatureLocal enables the mode where external-provisioner - // is deployed on each node. Not implemented yet. - FeatureLocal = Feature("local") - - // FeatureImmediateBinding enables the publishing of information - // for storage classes with immediate binding. Off by default - // because normally that information is not used as the Kubernetes - // scheduler lets the driver pick a topology segment. - FeatureImmediateBinding = Feature("immediate-binding") -) - -// Set enables the named features. Multiple features can be listed, separated by commas, -// with optional whitespace. -func (features *Features) Set(value string) error { - for _, part := range strings.Split(value, ",") { - part := Feature(strings.TrimSpace(part)) - switch part { - case FeatureCentral: - if *features == nil { - *features = Features{} - } - (*features)[part] = true - case FeatureLocal: - return fmt.Errorf("%s: not implemented yet", part) - case "": - default: - return fmt.Errorf("%s: unknown feature", part) - } - } - return nil -} - -func (features *Features) String() string { - var parts []string - for feature := range *features { - parts = append(parts, string(feature)) - } - return strings.Join(parts, ",") -} - -func (features *Features) Type() string { - return "enumeration" -} - -var _ flag.Value = &Features{} diff --git a/pkg/capacity/features_test.go b/pkg/capacity/features_test.go deleted file mode 100644 index fcb6e2af1c..0000000000 --- a/pkg/capacity/features_test.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package capacity - -import ( - "reflect" - "testing" -) - -func TestFeatures(t *testing.T) { - tests := []struct { - name string - input []string - expectedOutput Features - expectedError string - }{ - { - name: "empty", - }, - { - name: "central", - input: []string{string(FeatureCentral)}, - expectedOutput: Features{FeatureCentral: true}, - }, - { - name: "local", - input: []string{string(FeatureLocal)}, - expectedError: string(FeatureLocal) + ": not implemented yet", - }, - { - name: "invalid", - input: []string{"no-such-feature"}, - expectedError: "no-such-feature: unknown feature", - }, - { - name: "multi", - input: []string{string(FeatureCentral), string(FeatureCentral)}, - expectedOutput: Features{FeatureCentral: true}, - }, - { - name: "comma", - input: []string{string(FeatureCentral) + " ," + string(FeatureCentral) + " "}, - expectedOutput: Features{FeatureCentral: true}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var actual Features - for _, value := range test.input { - err := actual.Set(value) - if err != nil && test.expectedError != "" { - if err.Error() == test.expectedError { - return - } - t.Fatalf("expected error %q, got %v", test.expectedError, err) - } - if err == nil && test.expectedError != "" { - t.Fatalf("expected error %q, got no error", test.expectedError) - } - } - if !reflect.DeepEqual(actual, test.expectedOutput) { - t.Fatalf("expected %v, got %v", test.expectedOutput, actual) - } - }) - } -} diff --git a/pkg/capacity/mode.go b/pkg/capacity/mode.go new file mode 100644 index 0000000000..20adf2cb9d --- /dev/null +++ b/pkg/capacity/mode.go @@ -0,0 +1,63 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capacity + +import ( + "errors" + "strings" + + flag "github.com/spf13/pflag" +) + +// DeploymentMode determines how the capacity controller operates. +type DeploymentMode string + +const ( + // DeploymentModeCentral enables the mode where there is only one + // external-provisioner actively running in the cluster which + // talks to the CSI driver's controller service. + DeploymentModeCentral = DeploymentMode("central") + + // DeploymentModeLocal enables the mode where external-provisioner + // is deployed on each node. Not implemented yet. + DeploymentModeLocal = DeploymentMode("local") + + // DeploymentModeNone disables capacity support. + DeploymentModeNone = DeploymentMode("none") +) + +// Set enables the named features. Multiple features can be listed, separated by commas, +// with optional whitespace. +func (mode *DeploymentMode) Set(value string) error { + switch DeploymentMode(value) { + case DeploymentModeCentral, DeploymentModeNone: + *mode = DeploymentMode(value) + default: + return errors.New("invalid value") + } + return nil +} + +func (mode *DeploymentMode) String() string { + return string(*mode) +} + +func (mode *DeploymentMode) Type() string { + return strings.Join([]string{string(DeploymentModeCentral), string(DeploymentModeNone)}, "|") +} + +var _ flag.Value = new(DeploymentMode)