From 34b85b38b2e7ad4c49058429a72ef18734096988 Mon Sep 17 00:00:00 2001 From: Eneman Donatien Date: Fri, 23 Aug 2024 10:07:48 +0200 Subject: [PATCH] implement mulittenancy --- Dockerfile | 2 + README.md | 42 ++- api/v1alpha1/bucket_types.go | 6 +- api/v1alpha1/path_types.go | 4 + api/v1alpha1/policy_types.go | 6 +- api/v1alpha1/s3instance_types.go | 84 +++++ api/v1alpha1/s3user_types.go | 4 + api/v1alpha1/zz_generated.deepcopy.go | 103 +++++- config/crd/bases/s3.onyxia.sh_buckets.yaml | 3 + config/crd/bases/s3.onyxia.sh_paths.yaml | 3 + config/crd/bases/s3.onyxia.sh_policies.yaml | 3 + .../crd/bases/s3.onyxia.sh_s3instances.yaml | 142 ++++++++ config/crd/bases/s3.onyxia.sh_s3users.yaml | 3 + config/rbac/role.yaml | 26 ++ .../s3.onyxia.sh_v1alpha1_s3instance.yaml | 16 + controllers/bucket_controller.go | 70 +++- controllers/path_controller.go | 61 +++- controllers/policy_controller.go | 56 +++- controllers/s3instance_controller.go | 317 ++++++++++++++++++ controllers/user_controller.go | 98 ++++-- .../s3/factory/interface.go | 32 +- .../s3/factory/minioS3Client.go | 4 + .../s3/factory/mockedS3Client.go | 12 +- internal/s3/s3ClientCache.go | 70 ++++ {controllers => internal}/utils/const.go | 1 + .../utils/password/password_generator.go | 0 {controllers => internal}/utils/utils.go | 0 main.go | 88 ++--- 28 files changed, 1149 insertions(+), 107 deletions(-) create mode 100644 api/v1alpha1/s3instance_types.go create mode 100644 config/crd/bases/s3.onyxia.sh_s3instances.yaml create mode 100644 config/samples/s3.onyxia.sh_v1alpha1_s3instance.yaml create mode 100644 controllers/s3instance_controller.go rename {controllers => internal}/s3/factory/interface.go (55%) rename {controllers => internal}/s3/factory/minioS3Client.go (99%) rename {controllers => internal}/s3/factory/mockedS3Client.go (94%) create mode 100644 internal/s3/s3ClientCache.go rename {controllers => internal}/utils/const.go (78%) rename {controllers => internal}/utils/password/password_generator.go (100%) rename {controllers => internal}/utils/utils.go (100%) diff --git a/Dockerfile b/Dockerfile index 31c5110..d45a843 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,6 +15,8 @@ RUN go mod download COPY main.go main.go COPY api/ api/ COPY controllers/ controllers/ +COPY internal/ internal/ + # Build # the GOARCH has not a default value to allow the binary be built according to the host where the command diff --git a/README.md b/README.md index b49de8e..7873585 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ This Operator SDK based tool aims at managing S3 related resources (buckets, pol ## At a glance -- Current S3 providers : [Minio](https://github.com/InseeFrLab/s3-operator/blob/main/controllers/s3/factory/minioS3Client.go) +- Current S3 providers : [Minio](https://github.com/InseeFrLab/s3-operator/blob/main/internal/s3/factory/minioS3Client.go) - Currently managed S3 resources : [buckets](https://github.com/InseeFrLab/s3-operator/blob/main/api/v1alpha1/bucket_types.go), [policies](https://github.com/InseeFrLab/s3-operator/blob/main/api/v1alpha1/policy_types.go) ## Compatibility @@ -21,7 +21,8 @@ At its heart, the operator revolves around CRDs that match S3 resources : - `buckets.s3.onyxia.sh` - `policies.s3.onyxia.sh` - `paths.s3.onyxia.sh` -- `users.s3.onyxia.sh` +- `s3Users.s3.onyxia.sh` +- `s3Instances.s3.onyxia.sh` The custom resources based on these CRDs are a somewhat simplified projection of the real S3 resources. From the operator's point of view : @@ -29,6 +30,7 @@ The custom resources based on these CRDs are a somewhat simplified projection of - A `Policy` CR matches a "canned" policy (not a bucket policy, but a global one, that can be attached to a user), and has a name, and its actual content (IAM JSON) - A `Path` CR matches a set of paths inside of a policy. This is akin to the `paths` property of the `Bucket` CRD, except `Path` is not responsible for Bucket creation. - A `S3User` CR matches a user in the s3 server, and has a name, a set of policy and a set of group. +- A `S3Instance` CR matches a s3Instance. Each custom resource based on these CRDs on Kubernetes is to be matched with a resource on the S3 instance. If the CR and the corresponding S3 resource diverge, the operator will create or update the S3 resource to bring it back to. @@ -90,7 +92,7 @@ The parameters are summarized in the table below : | `path-deletion` | false | - | no | Trigger path deletion on the S3 backend upon CR deletion. Limited to deleting the `.keep` files used by the operator. | | `s3User-deletion` | false | - | no | Trigger S3User deletion on the S3 backend upon CR deletion. | | `override-existing-secret` | false | - | no | Update secret linked to s3User if already exist, else noop | - +| `s3LabelSelector` | "" | - | no | Filter resource that this instance will manage. If Empty all resource in the cluster will be manage | ## Minimal rights needed to work The Operator need at least this rights: @@ -147,6 +149,29 @@ The Operator need at least this rights: - The same will happen if you modify a CR - the operator will adjust the S3 bucket or policy accordingly - with the notable exception that it will not delete paths for buckets. - Upon deleting a CR, the corresponding bucket or policy will be left as is, as mentioned in the [*Description* section above](#description) +An instance of S3Operator can manage multiple S3. On each resource created you can set where to create it. To add multiple instance of S3 see S3Instance example. On each object deployed you can attach it to an existing s3Instance. If no instance is set on the resource, S3Operator will failback to default instance configured by env var. + +### S3Instance example + +```yaml +apiVersion: s3.onyxia.sh/v1alpha1 +kind: S3Instance +metadata: + labels: + app.kubernetes.io/name: bucket + app.kubernetes.io/instance: bucket-sample + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: s3-operator + name: s3-default-instance # Name of the S3Instance +spec: + s3Provider: minio # Type of the Provider. Can be "mockedS3Provider" or "minio" + urlEndpoint: minio.example.com # URL of the Provider + secretName: minio-credentials # Name of the secret containing 2 Keys S3_ACCESS_KEY and S3_SECRET_KEY + region: us-east-1 # Region of the Provider + useSSL: true # useSSL to query the Provider +``` + ### Bucket example ```yaml @@ -182,6 +207,10 @@ spec: quota: default: 10000000 # override: 20000000 + + # Optionnal, let empty if you have configured the default s3 else use an existing s3Instance + s3InstanceRef: "s3-default-instance" + ``` @@ -202,6 +231,9 @@ spec: # Policy name (on S3 server, as opposed to the name of the CR) name: dummy-policy + # Optionnal, let empty if you have configured the default s3 else use an existing s3Instance + s3InstanceRef: "s3-default-instance" + # Content of the policy, as a multiline string # This should be IAM compliant JSON - follow the guidelines of the actual # S3 provider you're using, as sometimes only a subset is available. @@ -245,6 +277,8 @@ spec: - /home/alice - /home/bob + # Optionnal, let empty if you have configured the default s3 else use an existing s3Instance + s3InstanceRef: "s3-default-instance" ``` @@ -266,6 +300,8 @@ spec: policies: - policy-example1 - policy-example2 + # Optionnal, let empty if you have configured the default s3 else use an existing s3Instance + s3InstanceRef: "s3-default-instance" ``` diff --git a/api/v1alpha1/bucket_types.go b/api/v1alpha1/bucket_types.go index 092aac6..7f15adb 100644 --- a/api/v1alpha1/bucket_types.go +++ b/api/v1alpha1/bucket_types.go @@ -36,6 +36,10 @@ type BucketSpec struct { // +kubebuilder:validation:Optional Paths []string `json:"paths,omitempty"` + // s3InstanceRef where create the bucket + // +kubebuilder:validation:Optional + S3InstanceRef string `json:"s3InstanceRef,omitempty"` + // Quota to apply to the bucket // +kubebuilder:validation:Required Quota Quota `json:"quota"` @@ -43,7 +47,7 @@ type BucketSpec struct { // BucketStatus defines the observed state of Bucket type BucketStatus struct { - // Status management using Conditions. + // Status management using Conditions. // See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } diff --git a/api/v1alpha1/path_types.go b/api/v1alpha1/path_types.go index 58f5aad..45c7ce9 100644 --- a/api/v1alpha1/path_types.go +++ b/api/v1alpha1/path_types.go @@ -35,6 +35,10 @@ type PathSpec struct { // Paths (folders) to create inside the bucket // +kubebuilder:validation:Optional Paths []string `json:"paths,omitempty"` + + // s3InstanceRef where create the Paths + // +kubebuilder:validation:Optional + S3InstanceRef string `json:"s3InstanceRef,omitempty"` } // PathStatus defines the observed state of Path diff --git a/api/v1alpha1/policy_types.go b/api/v1alpha1/policy_types.go index 6862f5e..ac7a07c 100644 --- a/api/v1alpha1/policy_types.go +++ b/api/v1alpha1/policy_types.go @@ -35,11 +35,15 @@ type PolicySpec struct { // +kubebuilder:validation:Required // Content of the policy (IAM JSON format) PolicyContent string `json:"policyContent"` + + // s3InstanceRef where create the Policy + // +kubebuilder:validation:Optional + S3InstanceRef string `json:"s3InstanceRef,omitempty"` } // PolicyStatus defines the observed state of Policy type PolicyStatus struct { - // Status management using Conditions. + // Status management using Conditions. // See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } diff --git a/api/v1alpha1/s3instance_types.go b/api/v1alpha1/s3instance_types.go new file mode 100644 index 0000000..eb03eff --- /dev/null +++ b/api/v1alpha1/s3instance_types.go @@ -0,0 +1,84 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// S3InstanceSpec defines the desired state of S3Instance +type S3InstanceSpec struct { + + // type of the S3Instance + // +kubebuilder:validation:Required + S3Provider string `json:"s3Provider"` + + // url of the S3Instance + // +kubebuilder:validation:Required + UrlEndpoint string `json:"urlEndpoint"` + + // SecretName associated to the S3Instance containing accessKey and secretKey + // +kubebuilder:validation:Required + SecretName string `json:"secretName"` + + // region associated to the S3Instance + // +kubebuilder:validation:Required + Region string `json:"region"` + + // useSSL when connecting to the S3Instance + // +kubebuilder:validation:Optional + UseSSL bool `json:"useSSL,omitempty"` + + // CaCertificatesBase64 associated to the S3InstanceUrl + // +kubebuilder:validation:Optional + CaCertificatesBase64 []string `json:"caCertificateBase64,omitempty"` +} + +// S3InstanceStatus defines the observed state of S3Instance +type S3InstanceStatus struct { + // Status management using Conditions. + // See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// S3Instance is the Schema for the S3Instances API +type S3Instance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec S3InstanceSpec `json:"spec,omitempty"` + Status S3InstanceStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// S3InstanceList contains a list of S3Instance +type S3InstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []S3Instance `json:"items"` +} + +func init() { + SchemeBuilder.Register(&S3Instance{}, &S3InstanceList{}) +} diff --git a/api/v1alpha1/s3user_types.go b/api/v1alpha1/s3user_types.go index ac40da6..e116a92 100644 --- a/api/v1alpha1/s3user_types.go +++ b/api/v1alpha1/s3user_types.go @@ -37,6 +37,10 @@ type S3UserSpec struct { // SecretName associated to the S3User // +kubebuilder:validation:Optional SecretName string `json:"secretName"` + + // s3InstanceRef where create the user + // +kubebuilder:validation:Optional + S3InstanceRef string `json:"s3InstanceRef,omitempty"` } // S3UserStatus defines the observed state of S3User diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 6761cdf..46c7fe1 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -340,6 +340,107 @@ func (in *Quota) DeepCopy() *Quota { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Instance) DeepCopyInto(out *S3Instance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Instance. +func (in *S3Instance) DeepCopy() *S3Instance { + if in == nil { + return nil + } + out := new(S3Instance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *S3Instance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InstanceList) DeepCopyInto(out *S3InstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]S3Instance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InstanceList. +func (in *S3InstanceList) DeepCopy() *S3InstanceList { + if in == nil { + return nil + } + out := new(S3InstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *S3InstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InstanceSpec) DeepCopyInto(out *S3InstanceSpec) { + *out = *in + if in.CaCertificatesBase64 != nil { + in, out := &in.CaCertificatesBase64, &out.CaCertificatesBase64 + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InstanceSpec. +func (in *S3InstanceSpec) DeepCopy() *S3InstanceSpec { + if in == nil { + return nil + } + out := new(S3InstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InstanceStatus) DeepCopyInto(out *S3InstanceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InstanceStatus. +func (in *S3InstanceStatus) DeepCopy() *S3InstanceStatus { + if in == nil { + return nil + } + out := new(S3InstanceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S3User) DeepCopyInto(out *S3User) { *out = *in @@ -439,4 +540,4 @@ func (in *S3UserStatus) DeepCopy() *S3UserStatus { out := new(S3UserStatus) in.DeepCopyInto(out) return out -} \ No newline at end of file +} diff --git a/config/crd/bases/s3.onyxia.sh_buckets.yaml b/config/crd/bases/s3.onyxia.sh_buckets.yaml index 6b2cbcd..098120e 100644 --- a/config/crd/bases/s3.onyxia.sh_buckets.yaml +++ b/config/crd/bases/s3.onyxia.sh_buckets.yaml @@ -57,6 +57,9 @@ spec: required: - default type: object + s3InstanceRef: + description: s3InstanceRef where create the bucket + type: string required: - name - quota diff --git a/config/crd/bases/s3.onyxia.sh_paths.yaml b/config/crd/bases/s3.onyxia.sh_paths.yaml index bc55aa3..c124fd0 100644 --- a/config/crd/bases/s3.onyxia.sh_paths.yaml +++ b/config/crd/bases/s3.onyxia.sh_paths.yaml @@ -43,6 +43,9 @@ spec: items: type: string type: array + s3InstanceRef: + description: s3InstanceRef where create the Paths + type: string required: - bucketName type: object diff --git a/config/crd/bases/s3.onyxia.sh_policies.yaml b/config/crd/bases/s3.onyxia.sh_policies.yaml index aa78618..aaa69a1 100644 --- a/config/crd/bases/s3.onyxia.sh_policies.yaml +++ b/config/crd/bases/s3.onyxia.sh_policies.yaml @@ -41,6 +41,9 @@ spec: policyContent: description: Content of the policy (IAM JSON format) type: string + s3InstanceRef: + description: s3InstanceRef where create the Policy + type: string required: - name - policyContent diff --git a/config/crd/bases/s3.onyxia.sh_s3instances.yaml b/config/crd/bases/s3.onyxia.sh_s3instances.yaml new file mode 100644 index 0000000..e1654f5 --- /dev/null +++ b/config/crd/bases/s3.onyxia.sh_s3instances.yaml @@ -0,0 +1,142 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: s3instances.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: S3Instance + listKind: S3InstanceList + plural: s3instances + singular: s3instance + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: S3Instance is the Schema for the S3Instances API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: S3InstanceSpec defines the desired state of S3Instance + properties: + caCertificateBase64: + description: CaCertificatesBase64 associated to the S3InstanceUrl + items: + type: string + type: array + region: + description: region associated to the S3Instance + type: string + s3Provider: + description: type of the S3Instance + type: string + secretName: + description: SecretName associated to the S3Instance containing accessKey + and secretKey + type: string + urlEndpoint: + description: url of the S3Instance + type: string + useSSL: + description: useSSL when connecting to the S3Instance + type: boolean + required: + - region + - s3Provider + - secretName + - urlEndpoint + type: object + status: + description: S3InstanceStatus defines the observed state of S3Instance + properties: + conditions: + description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/s3.onyxia.sh_s3users.yaml b/config/crd/bases/s3.onyxia.sh_s3users.yaml index 2c46a98..17b96cf 100644 --- a/config/crd/bases/s3.onyxia.sh_s3users.yaml +++ b/config/crd/bases/s3.onyxia.sh_s3users.yaml @@ -43,6 +43,9 @@ spec: items: type: string type: array + s3InstanceRef: + description: s3InstanceRef where create the user + type: string secretName: description: SecretName associated to the S3User type: string diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 5190141..559db62 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -5,6 +5,32 @@ metadata: creationTimestamp: null name: manager-role rules: +- apiGroups: + - s3.onyxia.sh + resources: + - S3Instance + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - S3Instance/finalizers + verbs: + - update +- apiGroups: + - s3.onyxia.sh + resources: + - S3Instance/status + verbs: + - get + - patch + - update - apiGroups: - s3.onyxia.sh resources: diff --git a/config/samples/s3.onyxia.sh_v1alpha1_s3instance.yaml b/config/samples/s3.onyxia.sh_v1alpha1_s3instance.yaml new file mode 100644 index 0000000..198da17 --- /dev/null +++ b/config/samples/s3.onyxia.sh_v1alpha1_s3instance.yaml @@ -0,0 +1,16 @@ +apiVersion: s3.onyxia.sh/v1alpha1 +kind: S3Instance +metadata: + labels: + app.kubernetes.io/name: bucket + app.kubernetes.io/instance: bucket-sample + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: s3-operator + name: s3-default-instance +spec: + s3Provider: minio + urlEndpoint: minio.example.com + secretName: minio-credentials + region: us-east-1 + useSSL: true diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 3d6e2fd..5e88562 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -22,8 +22,10 @@ import ( "time" s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" - utils "github.com/InseeFrLab/s3-operator/controllers/utils" + s3ClientCache "github.com/InseeFrLab/s3-operator/internal/s3" + "github.com/InseeFrLab/s3-operator/internal/s3/factory" + + utils "github.com/InseeFrLab/s3-operator/internal/utils" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -41,7 +43,7 @@ import ( type BucketReconciler struct { client.Client Scheme *runtime.Scheme - S3Client factory.S3Client + S3ClientCache *s3ClientCache.S3ClientCache BucketDeletion bool S3LabelSelectorValue string } @@ -93,7 +95,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // Run finalization logic for bucketFinalizer. If the // finalization logic fails, don't remove the finalizer so // that we can retry during the next reconciliation. - if err := r.finalizeBucket(bucketResource); err != nil { + if err := r.finalizeBucket(ctx, bucketResource); err != nil { // return ctrl.Result{}, err logger.Error(err, "an error occurred when attempting to finalize the bucket", "bucket", bucketResource.Spec.Name) // return ctrl.Result{}, err @@ -126,10 +128,17 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } } - // Bucket lifecycle management (other than deletion) starts here + // Create S3Client + s3Client, err := r.getS3InstanceForObject(ctx, bucketResource) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "FailedS3Client", + "Getting s3Client in cache has failed", err) + } + // Bucket lifecycle management (other than deletion) starts here // Check bucket existence on the S3 server - found, err := r.S3Client.BucketExists(bucketResource.Spec.Name) + found, err := s3Client.BucketExists(bucketResource.Spec.Name) if err != nil { logger.Error(err, "an error occurred while checking the existence of a bucket", "bucket", bucketResource.Spec.Name) return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketExistenceCheckFailed", @@ -140,7 +149,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr if !found { // Bucket creation - err = r.S3Client.CreateBucket(bucketResource.Spec.Name) + err = s3Client.CreateBucket(bucketResource.Spec.Name) if err != nil { logger.Error(err, "an error occurred while creating a bucket", "bucket", bucketResource.Spec.Name) return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketCreationFailed", @@ -148,7 +157,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } // Setting quotas - err = r.S3Client.SetQuota(bucketResource.Spec.Name, bucketResource.Spec.Quota.Default) + err = s3Client.SetQuota(bucketResource.Spec.Name, bucketResource.Spec.Quota.Default) if err != nil { logger.Error(err, "an error occurred while setting a quota on a bucket", "bucket", bucketResource.Spec.Name, "quota", bucketResource.Spec.Quota.Default) return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "SetQuotaOnBucketFailed", @@ -157,7 +166,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // Path creation for _, v := range bucketResource.Spec.Paths { - err = r.S3Client.CreatePath(bucketResource.Spec.Name, v) + err = s3Client.CreatePath(bucketResource.Spec.Name, v) if err != nil { logger.Error(err, "an error occurred while creating a path on a bucket", "bucket", bucketResource.Spec.Name, "path", v) return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "CreatingPathOnBucketFailed", @@ -174,7 +183,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // its corresponding custom resource, and update it in case the CR has changed. // Checking effectiveQuota existence on the bucket - effectiveQuota, err := r.S3Client.GetQuota(bucketResource.Spec.Name) + effectiveQuota, err := s3Client.GetQuota(bucketResource.Spec.Name) if err != nil { logger.Error(err, "an error occurred while getting the quota for a bucket", "bucket", bucketResource.Spec.Name) return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketQuotaCheckFailed", @@ -191,7 +200,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } if effectiveQuota != quotaToResetTo { - err = r.S3Client.SetQuota(bucketResource.Spec.Name, quotaToResetTo) + err = s3Client.SetQuota(bucketResource.Spec.Name, quotaToResetTo) if err != nil { logger.Error(err, "an error occurred while resetting the quota for a bucket", "bucket", bucketResource.Spec.Name, "quotaToResetTo", quotaToResetTo) return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketQuotaUpdateFailed", @@ -207,7 +216,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // But then again, some buckets will likely be filled with many objects outside the // scope of the CR, so getting all of them might be even more costly. for _, pathInCr := range bucketResource.Spec.Paths { - pathExists, err := r.S3Client.PathExists(bucketResource.Spec.Name, pathInCr) + pathExists, err := s3Client.PathExists(bucketResource.Spec.Name, pathInCr) if err != nil { logger.Error(err, "an error occurred while checking a path's existence on a bucket", "bucket", bucketResource.Spec.Name, "path", pathInCr) return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketPathCheckFailed", @@ -215,7 +224,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } if !pathExists { - err = r.S3Client.CreatePath(bucketResource.Spec.Name, pathInCr) + err = s3Client.CreatePath(bucketResource.Spec.Name, pathInCr) if err != nil { logger.Error(err, "an error occurred while creating a path on a bucket", "bucket", bucketResource.Spec.Name, "path", pathInCr) return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketPathCreationFailed", @@ -249,9 +258,16 @@ func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *BucketReconciler) finalizeBucket(bucketResource *s3v1alpha1.Bucket) error { +func (r *BucketReconciler) finalizeBucket(ctx context.Context, bucketResource *s3v1alpha1.Bucket) error { + logger := log.FromContext(ctx) + + s3Client, err := r.getS3InstanceForObject(ctx, bucketResource) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return err + } if r.BucketDeletion { - return r.S3Client.DeleteBucket(bucketResource.Spec.Name) + return s3Client.DeleteBucket(bucketResource.Spec.Name) } return nil } @@ -278,3 +294,27 @@ func (r *BucketReconciler) SetBucketStatusConditionAndUpdate(ctx context.Context } return ctrl.Result{}, srcError } + +func (r *BucketReconciler) getS3InstanceForObject(ctx context.Context, bucketResource *s3v1alpha1.Bucket) (factory.S3Client, error) { + logger := log.FromContext(ctx) + if bucketResource.Spec.S3InstanceRef == "" { + logger.Info("Bucket resource doesn't have S3InstanceRef fill, failback to default instance") + s3Client, found := r.S3ClientCache.Get("default") + if !found { + err := &s3ClientCache.S3ClientCacheError{Reason: "No default client was found"} + logger.Error(err, "No default client was found") + return nil, err + } + return s3Client, nil + } else { + + logger.Info(fmt.Sprintf("Bucket resource refer to s3Instance: %s, search instance in cache", bucketResource.Spec.S3InstanceRef)) + s3Client, found := r.S3ClientCache.Get(bucketResource.Spec.S3InstanceRef) + if !found { + err := &s3ClientCache.S3ClientCacheError{Reason: fmt.Sprintf("S3InstanceRef: %s, not found in cache", bucketResource.Spec.S3InstanceRef)} + logger.Error(err, "No client was found") + return nil, err + } + return s3Client, nil + } +} diff --git a/controllers/path_controller.go b/controllers/path_controller.go index d1125ce..f05549a 100644 --- a/controllers/path_controller.go +++ b/controllers/path_controller.go @@ -21,6 +21,7 @@ import ( "fmt" "time" + s3ClientCache "github.com/InseeFrLab/s3-operator/internal/s3" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -34,15 +35,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" - "github.com/InseeFrLab/s3-operator/controllers/utils" + "github.com/InseeFrLab/s3-operator/internal/s3/factory" + "github.com/InseeFrLab/s3-operator/internal/utils" ) // PathReconciler reconciles a Path object type PathReconciler struct { client.Client Scheme *runtime.Scheme - S3Client factory.S3Client + S3ClientCache *s3ClientCache.S3ClientCache PathDeletion bool S3LabelSelectorValue string } @@ -94,7 +95,7 @@ func (r *PathReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. // Run finalization logic for pathFinalizer. If the // finalization logic fails, don't remove the finalizer so // that we can retry during the next reconciliation. - if err := r.finalizePath(pathResource); err != nil { + if err := r.finalizePath(ctx, pathResource); err != nil { // return ctrl.Result{}, err logger.Error(err, "an error occurred when attempting to finalize the path", "path", pathResource.Name) // return ctrl.Result{}, err @@ -128,10 +129,18 @@ func (r *PathReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. } } + // Create S3Client + s3Client, err := r.getS3InstanceForObject(ctx, pathResource) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "FailedS3Client", + "Getting s3Client in cache has failed", err) + } + // Path lifecycle management (other than deletion) starts here // Check bucket existence on the S3 server - bucketFound, err := r.S3Client.BucketExists(pathResource.Spec.BucketName) + bucketFound, err := s3Client.BucketExists(pathResource.Spec.BucketName) if err != nil { logger.Error(err, "an error occurred while checking the existence of a bucket", "bucket", pathResource.Spec.BucketName) return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "BucketExistenceCheckFailed", @@ -155,7 +164,7 @@ func (r *PathReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. // But then again, some buckets will likely be filled with many objects outside the // scope of the CR, so getting all of them might be even more costly. for _, pathInCr := range pathResource.Spec.Paths { - pathExists, err := r.S3Client.PathExists(pathResource.Spec.BucketName, pathInCr) + pathExists, err := s3Client.PathExists(pathResource.Spec.BucketName, pathInCr) if err != nil { logger.Error(err, "an error occurred while checking a path's existence on a bucket", "bucket", pathResource.Spec.BucketName, "path", pathInCr) return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "PathCheckFailed", @@ -163,7 +172,7 @@ func (r *PathReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. } if !pathExists { - err = r.S3Client.CreatePath(pathResource.Spec.BucketName, pathInCr) + err = s3Client.CreatePath(pathResource.Spec.BucketName, pathInCr) if err != nil { logger.Error(err, "an error occurred while creating a path on a bucket", "bucket", pathResource.Spec.BucketName, "path", pathInCr) return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "PathCreationFailed", @@ -197,19 +206,25 @@ func (r *PathReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *PathReconciler) finalizePath(pathResource *s3v1alpha1.Path) error { - logger := log.Log.WithValues("controller", "path") +func (r *PathReconciler) finalizePath(ctx context.Context, pathResource *s3v1alpha1.Path) error { + logger := log.FromContext(ctx) + s3Client, err := r.getS3InstanceForObject(ctx, pathResource) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return err + } + if r.PathDeletion { var failedPaths []string = make([]string, 0) for _, path := range pathResource.Spec.Paths { - pathExists, err := r.S3Client.PathExists(pathResource.Spec.BucketName, path) + pathExists, err := s3Client.PathExists(pathResource.Spec.BucketName, path) if err != nil { logger.Error(err, "finalize : an error occurred while checking a path's existence on a bucket", "bucket", pathResource.Spec.BucketName, "path", path) } if pathExists { - err = r.S3Client.DeletePath(pathResource.Spec.BucketName, path) + err = s3Client.DeletePath(pathResource.Spec.BucketName, path) if err != nil { failedPaths = append(failedPaths, path) } @@ -245,3 +260,27 @@ func (r *PathReconciler) SetPathStatusConditionAndUpdate(ctx context.Context, pa } return ctrl.Result{}, srcError } + +func (r *PathReconciler) getS3InstanceForObject(ctx context.Context, pathResource *s3v1alpha1.Path) (factory.S3Client, error) { + logger := log.FromContext(ctx) + if pathResource.Spec.S3InstanceRef == "" { + logger.Info("Bucket resource doesn't refer to s3Instance, failback to default one") + s3Client, found := r.S3ClientCache.Get("default") + if !found { + err := &s3ClientCache.S3ClientCacheError{Reason: "No default client was found"} + logger.Error(err, "No default client was found") + return nil, err + } + return s3Client, nil + } else { + + logger.Info(fmt.Sprintf("Bucket resource doesn't refer to s3Instance: %s, search instance in cache", pathResource.Spec.S3InstanceRef)) + s3Client, found := r.S3ClientCache.Get(pathResource.Spec.S3InstanceRef) + if !found { + err := &s3ClientCache.S3ClientCacheError{Reason: fmt.Sprintf("S3InstanceRef: %s,not found in cache", pathResource.Spec.S3InstanceRef)} + logger.Error(err, "No client was found") + return nil, err + } + return s3Client, nil + } +} diff --git a/controllers/policy_controller.go b/controllers/policy_controller.go index 9ee9e3d..37c3ee8 100644 --- a/controllers/policy_controller.go +++ b/controllers/policy_controller.go @@ -37,15 +37,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" - "github.com/InseeFrLab/s3-operator/controllers/utils" + s3ClientCache "github.com/InseeFrLab/s3-operator/internal/s3" + "github.com/InseeFrLab/s3-operator/internal/s3/factory" + "github.com/InseeFrLab/s3-operator/internal/utils" ) // PolicyReconciler reconciles a Policy object type PolicyReconciler struct { client.Client Scheme *runtime.Scheme - S3Client factory.S3Client + S3ClientCache *s3ClientCache.S3ClientCache PolicyDeletion bool S3LabelSelectorValue string } @@ -97,7 +98,7 @@ func (r *PolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // Run finalization logic for policyFinalizer. If the // finalization logic fails, don't remove the finalizer so // that we can retry during the next reconciliation. - if err := r.finalizePolicy(policyResource); err != nil { + if err := r.finalizePolicy(ctx, policyResource); err != nil { // return ctrl.Result{}, err logger.Error(err, "an error occurred when attempting to finalize the policy", "policy", policyResource.Spec.Name) // return ctrl.Result{}, err @@ -133,8 +134,16 @@ func (r *PolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // Policy lifecycle management (other than deletion) starts here + // Create S3Client + s3Client, err := r.getS3InstanceForObject(ctx, policyResource) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "FailedS3Client", + "Getting s3Client in cache has failed", err) + } + // Check policy existence on the S3 server - effectivePolicy, err := r.S3Client.GetPolicyInfo(policyResource.Spec.Name) + effectivePolicy, err := s3Client.GetPolicyInfo(policyResource.Spec.Name) // If the policy does not exist on S3... if err != nil { @@ -146,7 +155,7 @@ func (r *PolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr if effectivePolicy == nil { // Policy creation using info from the CR - err = r.S3Client.CreateOrUpdatePolicy(policyResource.Spec.Name, policyResource.Spec.PolicyContent) + err = s3Client.CreateOrUpdatePolicy(policyResource.Spec.Name, policyResource.Spec.PolicyContent) if err != nil { logger.Error(err, "an error occurred while creating the policy", "policy", policyResource.Spec.Name) return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyCreationFailed", @@ -174,7 +183,7 @@ func (r *PolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } // If not we update the policy to match the CR - err = r.S3Client.CreateOrUpdatePolicy(policyResource.Spec.Name, policyResource.Spec.PolicyContent) + err = s3Client.CreateOrUpdatePolicy(policyResource.Spec.Name, policyResource.Spec.PolicyContent) if err != nil { logger.Error(err, "an error occurred while updating the policy", "policy", policyResource.Spec.Name) return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyUpdateFailed", @@ -224,9 +233,15 @@ func IsPolicyMatchingWithCustomResource(policyResource *s3v1alpha1.Policy, effec return bytes.Equal(buffer.Bytes(), marshalled), nil } -func (r *PolicyReconciler) finalizePolicy(policyResource *s3v1alpha1.Policy) error { +func (r *PolicyReconciler) finalizePolicy(ctx context.Context, policyResource *s3v1alpha1.Policy) error { + logger := log.FromContext(ctx) + s3Client, err := r.getS3InstanceForObject(ctx, policyResource) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return err + } if r.PolicyDeletion { - return r.S3Client.DeletePolicy(policyResource.Spec.Name) + return s3Client.DeletePolicy(policyResource.Spec.Name) } return nil } @@ -253,3 +268,26 @@ func (r *PolicyReconciler) SetPolicyStatusConditionAndUpdate(ctx context.Context } return ctrl.Result{}, srcError } + +func (r *PolicyReconciler) getS3InstanceForObject(ctx context.Context, policyResource *s3v1alpha1.Policy) (factory.S3Client, error) { + logger := log.FromContext(ctx) + if policyResource.Spec.S3InstanceRef == "" { + logger.Info("Bucket resource doesn't refer to s3Instance, failback to default one") + s3Client, found := r.S3ClientCache.Get("default") + if !found { + err := &s3ClientCache.S3ClientCacheError{Reason: "No default client was found"} + logger.Error(err, "No default client was found") + return nil, err + } + return s3Client, nil + } else { + logger.Info(fmt.Sprintf("Bucket resource doesn't refer to s3Instance: %s, search instance in cache", policyResource.Spec.S3InstanceRef)) + s3Client, found := r.S3ClientCache.Get(policyResource.Spec.S3InstanceRef) + if !found { + err := &s3ClientCache.S3ClientCacheError{Reason: fmt.Sprintf("S3InstanceRef: %s,not found in cache", policyResource.Spec.S3InstanceRef)} + logger.Error(err, "No client was found") + return nil, err + } + return s3Client, nil + } +} diff --git a/controllers/s3instance_controller.go b/controllers/s3instance_controller.go new file mode 100644 index 0000000..c46f1b5 --- /dev/null +++ b/controllers/s3instance_controller.go @@ -0,0 +1,317 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "reflect" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + s3ClientCache "github.com/InseeFrLab/s3-operator/internal/s3" + s3Factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + + utils "github.com/InseeFrLab/s3-operator/internal/utils" +) + +// S3InstanceReconciler reconciles a S3Instance object +type S3InstanceReconciler struct { + client.Client + Scheme *runtime.Scheme + S3ClientCache *s3ClientCache.S3ClientCache + S3LabelSelectorValue string +} + +const ( + s3InstanceFinalizer = "s3.onyxia.sh/s3InstanceFinalizer" +) + +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=S3Instance,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=S3Instance/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=S3Instance/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *S3InstanceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Checking for s3InstanceResource existence + s3InstanceResource := &s3v1alpha1.S3Instance{} + err := r.Get(ctx, req.NamespacedName, s3InstanceResource) + if err != nil { + if errors.IsNotFound(err) { + logger.Info(fmt.Sprintf("The S3InstanceResource CR %s has been removed. NOOP", req.Name)) + return ctrl.Result{}, nil + } + logger.Error(err, "An error occurred when fetching the S3InstanceResource from Kubernetes") + return ctrl.Result{}, err + } + + // check if this object must be manage by this instance + if r.S3LabelSelectorValue != "" { + labelSelectorValue, found := s3InstanceResource.Labels[utils.S3OperatorS3InstanceLabelSelectorKey] + if !found { + logger.Info("This s3Instance ressouce will not be manage by this instance because this instance require that s3Instance get labelSelector and label selector not found", "req.Name", req.Name, "Bucket Labels", s3InstanceResource.Labels, "S3OperatorBucketLabelSelectorKey", utils.S3OperatorBucketLabelSelectorKey) + return ctrl.Result{}, nil + } + if labelSelectorValue != r.S3LabelSelectorValue { + logger.Info("This s3Instance ressouce will not be manage by this instance because this instance require that s3Instance get specific a specific labelSelector value", "req.Name", req.Name, "expected", r.S3LabelSelectorValue, "current", labelSelectorValue) + return ctrl.Result{}, nil + } + } + + // Check if the s3InstanceResource instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. The object will be deleted. + if s3InstanceResource.GetDeletionTimestamp() != nil { + logger.Info("s3InstanceResource have been marked for deletion") + return r.handleS3InstanceDeletion(ctx, s3InstanceResource) + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(s3InstanceResource, s3InstanceFinalizer) { + logger.Info("adding finalizer to s3Instance") + + controllerutil.AddFinalizer(s3InstanceResource, s3InstanceFinalizer) + err = r.Update(ctx, s3InstanceResource) + if err != nil { + logger.Error(err, "an error occurred when adding finalizer from s3Instance", "s3Instance", s3InstanceResource.Name) + return r.setS3InstanceStatusConditionAndUpdate(ctx, s3InstanceResource, "OperatorFailed", metav1.ConditionFalse, "S3InstanceFinalizerAddFailed", + fmt.Sprintf("An error occurred when attempting to add the finalizer from s3Instance %s", s3InstanceResource.Name), err) + } + } + + // Check s3Instance existence + _, found := r.S3ClientCache.Get(s3InstanceResource.Name) + // If the s3Instance does not exist, it is created based on the CR + if !found { + logger.Info("this S3Instance doesn't exist and will be created") + return r.handleS3InstanceCreation(ctx, s3InstanceResource) + } + logger.Info("this S3Instance already exists and will be reconciled") + return r.handleS3InstanceUpdate(ctx, s3InstanceResource) + +} + +func (r *S3InstanceReconciler) handleS3InstanceUpdate(ctx context.Context, s3InstanceResource *s3v1alpha1.S3Instance) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, found := r.S3ClientCache.Get(s3InstanceResource.Name) + if !found { + err := &s3ClientCache.S3ClientCacheError{Reason: fmt.Sprintf("S3InstanceRef: %s,not found in cache", s3InstanceResource.Name)} + logger.Error(err, "No client was found") + } + s3Config := s3Client.GetConfig() + + // Get S3_ACCESS_KEY and S3_SECRET_KEY related to this s3Instance + + s3InstanceSecretSecretExpected, err := r.getS3InstanceSecret(ctx, s3InstanceResource) + if err != nil { + logger.Error(err, "Could not get s3InstanceSecret in namespace", "s3InstanceSecretRefName", s3InstanceResource.Spec.SecretName) + return r.setS3InstanceStatusConditionAndUpdate(ctx, s3InstanceResource, "OperatorFailed", metav1.ConditionFalse, "S3InstanceUpdateFailed", + fmt.Sprintf("Updating secret of S3Instance %s has failed", s3InstanceResource.Name), err) + } + + // if s3Provider have change recreate totaly One Differ instance will be deleted and recreated + if s3Config.S3Provider != s3InstanceResource.Spec.S3Provider || s3Config.S3UrlEndpoint != s3InstanceResource.Spec.UrlEndpoint || s3Config.UseSsl != s3InstanceResource.Spec.UseSSL || s3Config.Region != s3InstanceResource.Spec.Region || !reflect.DeepEqual(s3Config.CaCertificatesBase64, s3InstanceResource.Spec.CaCertificatesBase64) || s3Config.AccessKey != string(s3InstanceSecretSecretExpected.Data["S3_ACCESS_KEY"]) || s3Config.SecretKey != string(s3InstanceSecretSecretExpected.Data["S3_SECRET_KEY"]) { + logger.Info("Instance in cache not equal to expected , cache will be prune and instance recreate", "s3InstanceSecretRefName", s3InstanceResource.Spec.SecretName) + r.S3ClientCache.Remove(s3InstanceResource.Name) + return r.handleS3InstanceCreation(ctx, s3InstanceResource) + } + + return r.setS3InstanceStatusConditionAndUpdate(ctx, s3InstanceResource, "OperatorSucceeded", metav1.ConditionTrue, "S3InstanceUpdated", + fmt.Sprintf("The S3Instance %s was updated was reconcile successfully", s3InstanceResource.Name), nil) +} + +func (r *S3InstanceReconciler) handleS3InstanceCreation(ctx context.Context, s3InstanceResource *s3v1alpha1.S3Instance) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3InstanceSecretSecret, err := r.getS3InstanceSecret(ctx, s3InstanceResource) + if err != nil { + logger.Error(err, "Could not get s3InstanceSecret in namespace", "s3InstanceSecretRefName", s3InstanceResource.Spec.SecretName) + return r.setS3InstanceStatusConditionAndUpdate(ctx, s3InstanceResource, "OperatorFailed", metav1.ConditionFalse, "S3InstanceCreationFailed", + fmt.Sprintf("Getting secret of S3s3Instance %s has failed", s3InstanceResource.Name), err) + + } + + s3Config := &s3Factory.S3Config{S3Provider: s3InstanceResource.Spec.S3Provider, AccessKey: string(s3InstanceSecretSecret.Data["S3_ACCESS_KEY"]), SecretKey: string(s3InstanceSecretSecret.Data["S3_SECRET_KEY"]), S3UrlEndpoint: s3InstanceResource.Spec.UrlEndpoint, Region: s3InstanceResource.Spec.Region, UseSsl: s3InstanceResource.Spec.UseSSL, CaCertificatesBase64: s3InstanceResource.Spec.CaCertificatesBase64} + + s3Client, err := s3Factory.GenerateS3Client(s3Config.S3Provider, s3Config) + if err != nil { + return r.setS3InstanceStatusConditionAndUpdate(ctx, s3InstanceResource, "OperatorFailed", metav1.ConditionFalse, "S3InstanceCreationFailed", + fmt.Sprintf("Error while creating s3Instance %s", s3InstanceResource.Name), err) + } + + r.S3ClientCache.Set(s3InstanceResource.Name, s3Client) + + return r.setS3InstanceStatusConditionAndUpdate(ctx, s3InstanceResource, "OperatorSucceeded", metav1.ConditionTrue, "S3InstanceCreated", + fmt.Sprintf("The S3Instance %s was created successfully", s3InstanceResource.Name), nil) + +} + +func (r *S3InstanceReconciler) handleS3InstanceDeletion(ctx context.Context, s3InstanceResource *s3v1alpha1.S3Instance) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + if controllerutil.ContainsFinalizer(s3InstanceResource, s3InstanceFinalizer) { + // Run finalization logic for S3InstanceFinalizer. If the finalization logic fails, don't remove the finalizer so that we can retry during the next reconciliation. + if err := r.finalizeS3Instance(ctx, s3InstanceResource); err != nil { + logger.Error(err, "an error occurred when attempting to finalize the s3Instance", "s3Instance", s3InstanceResource.Name) + return r.setS3InstanceStatusConditionAndUpdate(ctx, s3InstanceResource, "OperatorFailed", metav1.ConditionFalse, "S3InstanceFinalizeFailed", + fmt.Sprintf("An error occurred when attempting to delete s3Instance %s", s3InstanceResource.Name), err) + } + + //Remove s3InstanceFinalizer. Once all finalizers have been removed, the object will be deleted. + controllerutil.RemoveFinalizer(s3InstanceResource, s3InstanceFinalizer) + // Unsure why the behavior is different to that of bucket/policy/path controllers, but it appears + // calling r.Update() for adding/removal of finalizer is not necessary (an update event is generated + // with the call to AddFinalizer/RemoveFinalizer), and worse, causes "freshness" problem (with the + // "the object has been modified; please apply your changes to the latest version and try again" error) + err := r.Update(ctx, s3InstanceResource) + if err != nil { + logger.Error(err, "Failed to remove finalizer.") + return r.setS3InstanceStatusConditionAndUpdate(ctx, s3InstanceResource, "OperatorFailed", metav1.ConditionFalse, "S3InstanceFinalizerRemovalFailed", + fmt.Sprintf("An error occurred when attempting to remove the finalizer from s3Instance %s", s3InstanceResource.Name), err) + } + } + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager.* +func (r *S3InstanceReconciler) SetupWithManager(mgr ctrl.Manager) error { + // filterLogger := ctrl.Log.WithName("filterEvt") + return ctrl.NewControllerManagedBy(mgr). + For(&s3v1alpha1.S3Instance{}). + // The "secret owning" implies the reconcile loop will be called whenever a Secret owned + // by a S3Instance is created/updated/deleted. In other words, even when creating a single S3Instance, + // there is going to be several iterations. + Owns(&corev1.Secret{}). + // See : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ + WithEventFilter(predicate.Funcs{ + + // Ignore updates to CR status in which case metadata.Generation does not change, + // unless it is a change to the underlying Secret + UpdateFunc: func(e event.UpdateEvent) bool { + + // To check if the update event is tied to a change on secret, + // we try to cast e.ObjectNew to a secret (only if it's not a S3Instance, which + // should prevent any TypeAssertionError based panic). + secretUpdate := false + newUser, _ := e.ObjectNew.(*s3v1alpha1.S3Instance) + if newUser == nil { + secretUpdate = (e.ObjectNew.(*corev1.Secret) != nil) + } + + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() || secretUpdate + }, + // Ignore create events caused by the underlying secret's creation + CreateFunc: func(e event.CreateEvent) bool { + s3Instance, _ := e.Object.(*s3v1alpha1.S3Instance) + return s3Instance != nil + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 10}). + Complete(r) +} + +func (r *S3InstanceReconciler) setS3InstanceStatusConditionAndUpdate(ctx context.Context, s3InstanceResource *s3v1alpha1.S3Instance, conditionType string, status metav1.ConditionStatus, reason string, message string, srcError error) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // We moved away from meta.SetStatusCondition, as the implementation did not allow for updating + // lastTransitionTime if a Condition (as identified by Reason instead of Type) was previously + // obtained and updated to again. + s3InstanceResource.Status.Conditions = utils.UpdateConditions(s3InstanceResource.Status.Conditions, metav1.Condition{ + Type: conditionType, + Status: status, + Reason: reason, + LastTransitionTime: metav1.NewTime(time.Now()), + Message: message, + ObservedGeneration: s3InstanceResource.GetGeneration(), + }) + + err := r.Status().Update(ctx, s3InstanceResource) + if err != nil { + logger.Error(err, "an error occurred while updating the status of the S3Instance resource") + return ctrl.Result{}, utilerrors.NewAggregate([]error{err, srcError}) + } + return ctrl.Result{}, srcError +} + +func (r *S3InstanceReconciler) finalizeS3Instance(ctx context.Context, s3InstanceResource *s3v1alpha1.S3Instance) error { + logger := log.FromContext(ctx) + // Create S3Client + logger.Info(fmt.Sprintf("Search S3Instance %s to delete in cache , search instance in cache", s3InstanceResource.Name)) + _, found := r.S3ClientCache.Get(s3InstanceResource.Name) + if !found { + err := &s3ClientCache.S3ClientCacheError{Reason: fmt.Sprintf("S3InstanceRef: %s,not found in cache cannot finalize", s3InstanceResource.Name)} + logger.Error(err, "No client was found") + return err + } + r.S3ClientCache.Remove(s3InstanceResource.Name) + return nil +} + +func (r *S3InstanceReconciler) getS3InstanceSecret(ctx context.Context, s3InstanceResource *s3v1alpha1.S3Instance) (corev1.Secret, error) { + logger := log.FromContext(ctx) + + secretsList := &corev1.SecretList{} + s3InstanceSecret := corev1.Secret{} + + err := r.List(ctx, secretsList, client.InNamespace(s3InstanceResource.Namespace)) + if err != nil { + logger.Error(err, "An error occurred while listing the secrets in s3instance's namespace") + return s3InstanceSecret, fmt.Errorf("SecretListingFailed") + } + + if len(secretsList.Items) == 0 { + logger.Info("The s3instance's namespace doesn't appear to contain any secret") + return s3InstanceSecret, nil + } + // In all the secrets inside the s3instance's namespace, one should have a name equal to + // the S3InstanceSecretRefName field. + s3InstanceSecretName := s3InstanceResource.Spec.SecretName + + // cmp.Or takes the first non "zero" value, see https://pkg.go.dev/cmp#Or + for _, secret := range secretsList.Items { + if secret.Name == s3InstanceSecretName { + s3InstanceSecret = secret + break + } + } + + return s3InstanceSecret, nil +} diff --git a/controllers/user_controller.go b/controllers/user_controller.go index 085a13b..c5bd4d8 100644 --- a/controllers/user_controller.go +++ b/controllers/user_controller.go @@ -39,16 +39,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" - utils "github.com/InseeFrLab/s3-operator/controllers/utils" - password "github.com/InseeFrLab/s3-operator/controllers/utils/password" + s3ClientCache "github.com/InseeFrLab/s3-operator/internal/s3" + "github.com/InseeFrLab/s3-operator/internal/s3/factory" + utils "github.com/InseeFrLab/s3-operator/internal/utils" + password "github.com/InseeFrLab/s3-operator/internal/utils/password" ) // S3UserReconciler reconciles a S3User object type S3UserReconciler struct { client.Client Scheme *runtime.Scheme - S3Client factory.S3Client + S3ClientCache *s3ClientCache.S3ClientCache S3UserDeletion bool OverrideExistingSecret bool S3LabelSelectorValue string @@ -116,7 +117,16 @@ func (r *S3UserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } // Check user existence on the S3 server - found, err := r.S3Client.UserExist(userResource.Spec.AccessKey) + + // Create S3Client + s3Client, err := r.getS3InstanceForObject(ctx, userResource) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "FailedS3Client", + "Getting s3Client in cache has failed", err) + } + + found, err := s3Client.UserExist(userResource.Spec.AccessKey) if err != nil { logger.Error(err, "an error occurred while checking the existence of a user", "user", userResource.Name) return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserExistenceCheckFailed", @@ -135,7 +145,13 @@ func (r *S3UserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr func (r *S3UserReconciler) handleS3ExistingUser(ctx context.Context, userResource *s3v1alpha1.S3User) (reconcile.Result, error) { logger := log.FromContext(ctx) - + // Create S3Client + s3Client, err := r.getS3InstanceForObject(ctx, userResource) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "FailedS3Client", + "Getting s3Client in cache has failed", err) + } // --- Begin Secret management section userOwnedSecret, err := r.getUserSecret(ctx, userResource) @@ -144,7 +160,7 @@ func (r *S3UserReconciler) handleS3ExistingUser(ctx context.Context, userResourc logger.Error(err, "An error occurred when trying to obtain the user's secret. The user will be deleted from S3 backend and recreated with a secret.") r.deleteSecret(ctx, &userOwnedSecret) - err = r.S3Client.DeleteUser(userResource.Spec.AccessKey) + err = s3Client.DeleteUser(userResource.Spec.AccessKey) if err != nil { logger.Error(err, "Could not delete user on S3 server", "user", userResource.Name) return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserDeletionFailed", @@ -159,7 +175,7 @@ func (r *S3UserReconciler) handleS3ExistingUser(ctx context.Context, userResourc if userOwnedSecret.Name == "" { logger.Info("Secret associated to user not found, user will be deleted from the S3 backend, then recreated with a secret") - err = r.S3Client.DeleteUser(userResource.Spec.AccessKey) + err = s3Client.DeleteUser(userResource.Spec.AccessKey) if err != nil { logger.Error(err, "Could not delete user on S3 server", "user", userResource.Name) return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserDeletionFailed", @@ -171,7 +187,7 @@ func (r *S3UserReconciler) handleS3ExistingUser(ctx context.Context, userResourc // If a matching secret is found, then we check if it is still valid, as in : do the credentials it // contains still allow authenticating the S3User on the backend ? If not, the user is deleted and recreated. // credentialsValid, err := r.S3Client.CheckUserCredentialsValid(userResource.Name, userResource.Spec.AccessKey, string(userOwnedSecret.Data["secretKey"])) - credentialsValid, err := r.S3Client.CheckUserCredentialsValid(userResource.Name, string(userOwnedSecret.Data["accessKey"]), string(userOwnedSecret.Data["secretKey"])) + credentialsValid, err := s3Client.CheckUserCredentialsValid(userResource.Name, string(userOwnedSecret.Data["accessKey"]), string(userOwnedSecret.Data["secretKey"])) if err != nil { logger.Error(err, "An error occurred when checking if user credentials were valid", "user", userResource.Name) return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserCredentialsCheckFailed", @@ -181,7 +197,7 @@ func (r *S3UserReconciler) handleS3ExistingUser(ctx context.Context, userResourc if !credentialsValid { logger.Info("The secret containing the credentials will be deleted, and the user will be deleted from the S3 backend, then recreated (through another reconcile)") r.deleteSecret(ctx, &userOwnedSecret) - err = r.S3Client.DeleteUser(userResource.Spec.AccessKey) + err = s3Client.DeleteUser(userResource.Spec.AccessKey) if err != nil { logger.Error(err, "Could not delete user on S3 server", "user", userResource.Name) return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserDeletionFailed", @@ -195,7 +211,7 @@ func (r *S3UserReconciler) handleS3ExistingUser(ctx context.Context, userResourc // --- End Secret management section logger.Info("Checking user policies") - userPolicies, err := r.S3Client.GetUserPolicies(userResource.Spec.AccessKey) + userPolicies, err := s3Client.GetUserPolicies(userResource.Spec.AccessKey) if err != nil { logger.Error(err, "Could not check the user's policies") return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyCheckFailed", @@ -221,7 +237,7 @@ func (r *S3UserReconciler) handleS3ExistingUser(ctx context.Context, userResourc } if len(policyToDelete) > 0 { - err = r.S3Client.RemovePoliciesFromUser(userResource.Spec.AccessKey, policyToDelete) + err = s3Client.RemovePoliciesFromUser(userResource.Spec.AccessKey, policyToDelete) if err != nil { logger.Error(err, "an error occurred while removing policy to user", "user", userResource.Name) return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyAppendFailed", @@ -230,7 +246,7 @@ func (r *S3UserReconciler) handleS3ExistingUser(ctx context.Context, userResourc } if len(policyToAdd) > 0 { - err := r.S3Client.AddPoliciesToUser(userResource.Spec.AccessKey, policyToAdd) + err := s3Client.AddPoliciesToUser(userResource.Spec.AccessKey, policyToAdd) if err != nil { logger.Error(err, "an error occurred while adding policy to user", "user", userResource.Name) return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyAppendFailed", @@ -254,7 +270,13 @@ func (r *S3UserReconciler) handleS3ExistingUser(ctx context.Context, userResourc func (r *S3UserReconciler) handleS3NewUser(ctx context.Context, userResource *s3v1alpha1.S3User) (reconcile.Result, error) { logger := log.FromContext(ctx) - + // Create S3Client + s3Client, err := r.getS3InstanceForObject(ctx, userResource) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "FailedS3Client", + "Getting s3Client in cache has failed", err) + } // Generating a random secret key secretKey, err := password.Generate(20, true, false, true) if err != nil { @@ -282,7 +304,7 @@ func (r *S3UserReconciler) handleS3NewUser(ctx context.Context, userResource *s3 logger.Info("No secret found ; creating a new Secret", "Secret.Namespace", secret.Namespace, "Secret.Name", secret.Name) // Creating the user - err = r.S3Client.CreateUser(userResource.Spec.AccessKey, secretKey) + err = s3Client.CreateUser(userResource.Spec.AccessKey, secretKey) if err != nil { logger.Error(err, "an error occurred while creating user on S3 server", "user", userResource.Name) @@ -332,7 +354,7 @@ func (r *S3UserReconciler) handleS3NewUser(ctx context.Context, userResource *s3 logger.Info(fmt.Sprintf("A secret with the name %s already exists ; it will be overwritten as per operator configuration", secret.Name)) // Creating the user - err = r.S3Client.CreateUser(userResource.Spec.AccessKey, secretKey) + err = s3Client.CreateUser(userResource.Spec.AccessKey, secretKey) if err != nil { logger.Error(err, "an error occurred while creating user on S3 server", "user", userResource.Name) @@ -372,9 +394,15 @@ func (r *S3UserReconciler) handleS3NewUser(ctx context.Context, userResource *s3 func (r *S3UserReconciler) addPoliciesToUser(ctx context.Context, userResource *s3v1alpha1.S3User) error { logger := log.FromContext(ctx) + // Create S3Client + s3Client, err := r.getS3InstanceForObject(ctx, userResource) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return err + } policies := userResource.Spec.Policies if policies != nil { - err := r.S3Client.AddPoliciesToUser(userResource.Spec.AccessKey, policies) + err := s3Client.AddPoliciesToUser(userResource.Spec.AccessKey, policies) if err != nil { logger.Error(err, "an error occurred while adding policy to user", "user", userResource.Name) return err @@ -388,7 +416,7 @@ func (r *S3UserReconciler) handleS3UserDeletion(ctx context.Context, userResourc if controllerutil.ContainsFinalizer(userResource, userFinalizer) { // Run finalization logic for S3UserFinalizer. If the finalization logic fails, don't remove the finalizer so that we can retry during the next reconciliation. - if err := r.finalizeS3User(userResource); err != nil { + if err := r.finalizeS3User(ctx, userResource); err != nil { logger.Error(err, "an error occurred when attempting to finalize the user", "user", userResource.Name) return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserFinalizeFailed", fmt.Sprintf("An error occurred when attempting to delete user %s", userResource.Name), err) @@ -526,9 +554,16 @@ func (r *S3UserReconciler) setS3UserStatusConditionAndUpdate(ctx context.Context return ctrl.Result{}, srcError } -func (r *S3UserReconciler) finalizeS3User(userResource *s3v1alpha1.S3User) error { +func (r *S3UserReconciler) finalizeS3User(ctx context.Context, userResource *s3v1alpha1.S3User) error { + logger := log.FromContext(ctx) + // Create S3Client + s3Client, err := r.getS3InstanceForObject(ctx, userResource) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return err + } if r.S3UserDeletion { - return r.S3Client.DeleteUser(userResource.Spec.AccessKey) + return s3Client.DeleteUser(userResource.Spec.AccessKey) } return nil } @@ -571,3 +606,26 @@ func (r *S3UserReconciler) newSecretForCR(ctx context.Context, userResource *s3v return secret, nil } + +func (r *S3UserReconciler) getS3InstanceForObject(ctx context.Context, userResource *s3v1alpha1.S3User) (factory.S3Client, error) { + logger := log.FromContext(ctx) + if userResource.Spec.S3InstanceRef == "" { + logger.Info("Bucket resource doesn't refer to s3Instance, failback to default one") + s3Client, found := r.S3ClientCache.Get("default") + if !found { + err := &s3ClientCache.S3ClientCacheError{Reason: "No default client was found"} + logger.Error(err, "No default client was found") + return nil, err + } + return s3Client, nil + } else { + logger.Info(fmt.Sprintf("Bucket resource doesn't refer to s3Instance: %s, search instance in cache", userResource.Spec.S3InstanceRef)) + s3Client, found := r.S3ClientCache.Get(userResource.Spec.S3InstanceRef) + if !found { + err := &s3ClientCache.S3ClientCacheError{Reason: fmt.Sprintf("S3InstanceRef: %s,not found in cache", userResource.Spec.S3InstanceRef)} + logger.Error(err, "No client was found") + return nil, err + } + return s3Client, nil + } +} diff --git a/controllers/s3/factory/interface.go b/internal/s3/factory/interface.go similarity index 55% rename from controllers/s3/factory/interface.go rename to internal/s3/factory/interface.go index e8bf10f..dbd14b2 100644 --- a/controllers/s3/factory/interface.go +++ b/internal/s3/factory/interface.go @@ -2,6 +2,7 @@ package factory import ( "fmt" + "os" "github.com/minio/madmin-go/v3" @@ -35,6 +36,7 @@ type S3Client interface { GetUserPolicies(name string) ([]string, error) AddPoliciesToUser(accessKey string, policies []string) error RemovePoliciesFromUser(accessKey string, policies []string) error + GetConfig() *S3Config } type S3Config struct { @@ -48,7 +50,7 @@ type S3Config struct { CaBundlePath string } -func GetS3Client(s3Provider string, S3Config *S3Config) (S3Client, error) { +func GenerateS3Client(s3Provider string, S3Config *S3Config) (S3Client, error) { if s3Provider == "mockedS3Provider" { return newMockedS3Client(), nil } @@ -57,3 +59,31 @@ func GetS3Client(s3Provider string, S3Config *S3Config) (S3Client, error) { } return nil, fmt.Errorf("s3 provider " + s3Provider + "not supported") } + +func GenerateDefaultS3Client(s3Provider string, s3UrlEndpoint string, accessKey string, secretKey string, region string, useSsl bool, caCertificatesBase64 []string, caBundlePath string) (S3Client, error) { + // For S3 access key and secret key, we first try to read the values from environment variables. + // Only if these are not defined do we use the respective flags. + + var accessKeyFromEnvIfAvailable = os.Getenv("S3_ACCESS_KEY") + if accessKeyFromEnvIfAvailable == "" { + accessKeyFromEnvIfAvailable = accessKey + } + var secretKeyFromEnvIfAvailable = os.Getenv("S3_SECRET_KEY") + if secretKeyFromEnvIfAvailable == "" { + secretKeyFromEnvIfAvailable = secretKey + } + + if s3Provider == "" || s3UrlEndpoint == "" || accessKeyFromEnvIfAvailable == "" || secretKeyFromEnvIfAvailable == "" { + s3Logger.Info("No default S3Client to create") + return nil, nil + } + + if s3Provider == "mockedS3Provider" { + return newMockedS3Client(), nil + } + if s3Provider == "minio" { + S3Config := &S3Config{S3Provider: s3Provider, S3UrlEndpoint: s3UrlEndpoint, Region: region, AccessKey: accessKeyFromEnvIfAvailable, SecretKey: secretKeyFromEnvIfAvailable, UseSsl: useSsl, CaCertificatesBase64: caCertificatesBase64, CaBundlePath: caBundlePath} + return newMinioS3Client(S3Config), nil + } + return nil, fmt.Errorf("s3 provider " + s3Provider + "not supported") +} diff --git a/controllers/s3/factory/minioS3Client.go b/internal/s3/factory/minioS3Client.go similarity index 99% rename from controllers/s3/factory/minioS3Client.go rename to internal/s3/factory/minioS3Client.go index fb307d7..d1e51ee 100644 --- a/controllers/s3/factory/minioS3Client.go +++ b/internal/s3/factory/minioS3Client.go @@ -394,3 +394,7 @@ func (minioS3Client *MinioS3Client) AddPoliciesToUser(accessKey string, policies } return nil } + +func (minioS3Client *MinioS3Client) GetConfig() *S3Config { + return &minioS3Client.s3Config +} diff --git a/controllers/s3/factory/mockedS3Client.go b/internal/s3/factory/mockedS3Client.go similarity index 94% rename from controllers/s3/factory/mockedS3Client.go rename to internal/s3/factory/mockedS3Client.go index 96770fb..bd6703f 100644 --- a/controllers/s3/factory/mockedS3Client.go +++ b/internal/s3/factory/mockedS3Client.go @@ -4,7 +4,9 @@ import ( "github.com/minio/madmin-go/v3" ) -type MockedS3Client struct{} +type MockedS3Client struct { + s3Config S3Config +} func (mockedS3Provider *MockedS3Client) BucketExists(name string) (bool, error) { s3Logger.Info("checking bucket existence", "bucket", name) @@ -76,13 +78,11 @@ func (mockedS3Provider *MockedS3Client) PolicyExist(name string) (bool, error) { return true, nil } - func (mockedS3Provider *MockedS3Client) AddPoliciesToUser(username string, policies []string) error { s3Logger.Info("Adding policies to user", "user", username, "policies", policies) return nil } - func (mockedS3Provider *MockedS3Client) DeletePolicy(name string) error { s3Logger.Info("delete policy", "policy", name) return nil @@ -108,6 +108,10 @@ func (mockedS3Provider *MockedS3Client) RemovePoliciesFromUser(username string, return nil } +func (mockedS3Provider *MockedS3Client) GetConfig() *S3Config { + return &mockedS3Provider.s3Config +} + func newMockedS3Client() *MockedS3Client { - return &MockedS3Client{} + return &MockedS3Client{s3Config: S3Config{}} } diff --git a/internal/s3/s3ClientCache.go b/internal/s3/s3ClientCache.go new file mode 100644 index 0000000..e8b5851 --- /dev/null +++ b/internal/s3/s3ClientCache.go @@ -0,0 +1,70 @@ +package S3ClientCache + +import ( + "fmt" + "sync" + + "github.com/InseeFrLab/s3-operator/internal/s3/factory" +) + +// Cache is a basic in-memory key-value cache implementation. +type S3ClientCache struct { + items map[string]factory.S3Client // The map storing key-value pairs. + mu sync.Mutex // Mutex for controlling concurrent access to the cache. +} + +// New creates a new Cache instance. +func New() *S3ClientCache { + return &S3ClientCache{ + items: make(map[string]factory.S3Client), + } +} + +// Set adds or updates a key-value pair in the cache. +func (c *S3ClientCache) Set(key string, value factory.S3Client) { + c.mu.Lock() + defer c.mu.Unlock() + + c.items[key] = value +} + +// Get retrieves the value associated with the given key from the cache. The bool +// return value will be false if no matching key is found, and true otherwise. +func (c *S3ClientCache) Get(key string) (factory.S3Client, bool) { + c.mu.Lock() + defer c.mu.Unlock() + + value, found := c.items[key] + return value, found +} + +// Remove deletes the key-value pair with the specified key from the cache. +func (c *S3ClientCache) Remove(key string) { + c.mu.Lock() + defer c.mu.Unlock() + + delete(c.items, key) +} + +// Pop removes and returns the value associated with the specified key from the cache. +func (c *S3ClientCache) Pop(key string) (factory.S3Client, bool) { + c.mu.Lock() + defer c.mu.Unlock() + + value, found := c.items[key] + + // If the key is found, delete the key-value pair from the cache. + if found { + delete(c.items, key) + } + + return value, found +} + +type S3ClientCacheError struct { + Reason string +} + +func (r *S3ClientCacheError) Error() string { + return fmt.Sprintf("%s", r.Reason) +} diff --git a/controllers/utils/const.go b/internal/utils/const.go similarity index 78% rename from controllers/utils/const.go rename to internal/utils/const.go index 6c926a8..e984f72 100644 --- a/controllers/utils/const.go +++ b/internal/utils/const.go @@ -4,3 +4,4 @@ const S3OperatorBucketLabelSelectorKey = "s3operator.bucket.managed-by" const S3OperatorPathLabelSelectorKey = "s3operator.path.managed-by" const S3OperatorPolicyLabelSelectorKey = "s3operator.policy.managed-by" const S3OperatorUserLabelSelectorKey = "s3operator.user.managed-by" +const S3OperatorS3InstanceLabelSelectorKey = "s3operator.s3Instance.managed-by" diff --git a/controllers/utils/password/password_generator.go b/internal/utils/password/password_generator.go similarity index 100% rename from controllers/utils/password/password_generator.go rename to internal/utils/password/password_generator.go diff --git a/controllers/utils/utils.go b/internal/utils/utils.go similarity index 100% rename from controllers/utils/utils.go rename to internal/utils/utils.go diff --git a/main.go b/main.go index 742ba5d..e48edab 100644 --- a/main.go +++ b/main.go @@ -27,7 +27,9 @@ import ( s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" controllers "github.com/InseeFrLab/s3-operator/controllers" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" + s3ClientCache "github.com/InseeFrLab/s3-operator/internal/s3" + "github.com/InseeFrLab/s3-operator/internal/s3/factory" + "go.uber.org/zap/zapcore" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -152,67 +154,71 @@ func main() { os.Exit(1) } - // For S3 access key and secret key, we first try to read the values from environment variables. - // Only if these are not defined do we use the respective flags. - var accessKeyFromEnvIfAvailable = os.Getenv("S3_ACCESS_KEY") - if accessKeyFromEnvIfAvailable == "" { - accessKeyFromEnvIfAvailable = accessKey - } - var secretKeyFromEnvIfAvailable = os.Getenv("S3_SECRET_KEY") - if secretKeyFromEnvIfAvailable == "" { - secretKeyFromEnvIfAvailable = secretKey - } + s3ClientCache := s3ClientCache.New() + + // Creation of the default S3 client + s3DefaultClient, err := factory.GenerateDefaultS3Client(s3Provider, s3EndpointUrl, accessKey, secretKey, region, useSsl, caCertificatesBase64, caCertificatesBundlePath) - // Creation of the S3 client - s3Config := &factory.S3Config{S3Provider: s3Provider, S3UrlEndpoint: s3EndpointUrl, Region: region, AccessKey: accessKeyFromEnvIfAvailable, SecretKey: secretKeyFromEnvIfAvailable, UseSsl: useSsl, CaCertificatesBase64: caCertificatesBase64, CaBundlePath: caCertificatesBundlePath} - s3Client, err := factory.GetS3Client(s3Config.S3Provider, s3Config) if err != nil { // setupLog.Log.Error(err, err.Error()) // fmt.Print(s3Client) // fmt.Print(err) - setupLog.Error(err, "an error occurred while creating the S3 client", "s3Client", s3Client) + setupLog.Error(err, "an error occurred while creating the S3 client", "s3Client", s3DefaultClient) os.Exit(1) } + if s3DefaultClient != nil { + s3ClientCache.Set("default", s3DefaultClient) + } + if err = (&controllers.BucketReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - S3Client: s3Client, + S3ClientCache: s3ClientCache, BucketDeletion: bucketDeletion, S3LabelSelectorValue: s3LabelSelector, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Bucket") os.Exit(1) } - if err = (&controllers.PathReconciler{ + // if err = (&controllers.PathReconciler{ + // Client: mgr.GetClient(), + // Scheme: mgr.GetScheme(), + // S3ClientCache: s3ClientCache, + // PathDeletion: pathDeletion, + // S3LabelSelectorValue: s3LabelSelector, + // }).SetupWithManager(mgr); err != nil { + // setupLog.Error(err, "unable to create controller", "controller", "Path") + // os.Exit(1) + // } + // if err = (&controllers.PolicyReconciler{ + // Client: mgr.GetClient(), + // Scheme: mgr.GetScheme(), + // S3ClientCache: s3ClientCache, + // PolicyDeletion: policyDeletion, + // S3LabelSelectorValue: s3LabelSelector, + // }).SetupWithManager(mgr); err != nil { + // setupLog.Error(err, "unable to create controller", "controller", "Policy") + // os.Exit(1) + // } + // if err = (&controllers.S3UserReconciler{ + // Client: mgr.GetClient(), + // Scheme: mgr.GetScheme(), + // S3ClientCache: s3ClientCache, + // S3UserDeletion: s3userDeletion, + // OverrideExistingSecret: overrideExistingSecret, + // S3LabelSelectorValue: s3LabelSelector, + // }).SetupWithManager(mgr); err != nil { + // setupLog.Error(err, "unable to create controller", "controller", "S3User") + // os.Exit(1) + // } + if err = (&controllers.S3InstanceReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - S3Client: s3Client, - PathDeletion: pathDeletion, + S3ClientCache: s3ClientCache, S3LabelSelectorValue: s3LabelSelector, }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Path") - os.Exit(1) - } - if err = (&controllers.PolicyReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - S3Client: s3Client, - PolicyDeletion: policyDeletion, - S3LabelSelectorValue: s3LabelSelector, - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Policy") - os.Exit(1) - } - if err = (&controllers.S3UserReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - S3Client: s3Client, - S3UserDeletion: s3userDeletion, - OverrideExistingSecret: overrideExistingSecret, - S3LabelSelectorValue: s3LabelSelector, - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "S3User") + setupLog.Error(err, "unable to create controller", "controller", "S3Instance") os.Exit(1) } //+kubebuilder:scaffold:builder