diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..65ff66e --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,19 @@ +"jobs": + "build": + "name": "Deploy docs" + "runs-on": "ubuntu-latest" + "steps": + - "name": "Checkout main" + "uses": "actions/checkout@v3" + "with": + "fetch-depth": 0 + - "uses": "actions/setup-python@v2" + - "run": "pip install -r requirements.txt" + - "run": "git config user.name 'github-actions[bot]' && git config user.email 'github-actions[bot]@users.noreply.github.com'" + - "name": "Publish docs" + "run": "mkdocs gh-deploy --force" +"name": "Publish docs via GitHub Pages" +"on": + "push": + "branches": + - "main" \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..985645d --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2021 Grafana Labs, sh0rez, Duologic + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index d65bcc5..f609426 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,5 @@ -# crossplane-core-libsonnet -crossplane-core jsonnet library +# crossplane-core Jsonnet library + +This library is generated with [`k8s`](https://github.com/jsonnet-libs/k8s). + +[Docs](https://jsonnet-libs.github.io/crossplane-core-libsonnet) diff --git a/crossplane/1.17/_custom/compositeResourceDefinition.libsonnet b/crossplane/1.17/_custom/compositeResourceDefinition.libsonnet new file mode 100644 index 0000000..b2eb9c1 --- /dev/null +++ b/crossplane/1.17/_custom/compositeResourceDefinition.libsonnet @@ -0,0 +1,78 @@ +local d = import 'doc-util/main.libsonnet'; + +{ + apiextensions+: { + v1+: { + xrd: self.compositeResourceDefinition, + compositeResourceDefinition+: { + + '#new':: d.fn(help=||| + new returns an instance of CompositeResourceDefinition= + + For example: xpostgresqlinstances.example.org + + - `kind`: XPostgreSQLInstance + - `plural`: xpostgresqlinstances + - `group`: example.org + + A common convention is that the XR (composite resource) are prefixed with 'X' + while claim names are not. This lets app team members think of creating a claim + as (e.g.) 'creating a PostgreSQLInstance'. Use `withClaimNames` to set this. + |||, args=[ + d.arg('kind', d.T.string), + d.arg('plural', d.T.string), + d.arg('group', d.T.string), + ]), + new(kind, plural, group): + super.new(plural + '.' + group) + + super.metadata.withAnnotations({ + // Tell Tanka to not set metadata.namespace. + 'tanka.dev/namespaced': 'false', + }) + + super.spec.withGroup(group) + + super.spec.names.withKind(kind) + + super.spec.names.withPlural(plural) + , + + '#withClaimNames':: d.fn(help=||| + Sets the ClaimNames attribute. + + Example: + - `kind`: PostgreSQLInstance + - `plural`: postgresqlinstances + + A common convention is that the XR (composite resource) are prefixed with 'X' + while claim names are not. This lets app team members think of creating a claim + as (e.g.) 'creating a PostgreSQLInstance'. + |||, args=[ + d.arg('kind', d.T.string), + d.arg('plural', d.T.string), + ]), + withClaimNames(kind, plural): + super.spec.claimNames.withKind(kind) + + super.spec.claimNames.withPlural(plural), + + '#mapVersions':: d.fn(help=||| + Sets the ClaimNames attribute. + + Example: + - `kind`: PostgreSQLInstance + - `plural`: postgresqlinstances + + A common convention is that the XR (composite resource) are prefixed with 'X' + while claim names are not. This lets app team members think of creating a claim + as (e.g.) 'creating a PostgreSQLInstance'. + |||, args=[ + d.arg('kind', d.T.string), + d.arg('plural', d.T.string), + ]), + mapVersions(f): { + local versions = super.spec.versions, + spec+: { + versions: std.map(f, versions), + }, + }, + }, + }, + }, +} diff --git a/crossplane/1.17/_custom/composition.libsonnet b/crossplane/1.17/_custom/composition.libsonnet new file mode 100644 index 0000000..bdcad67 --- /dev/null +++ b/crossplane/1.17/_custom/composition.libsonnet @@ -0,0 +1,54 @@ +local d = import 'doc-util/main.libsonnet'; + +{ + apiextensions+: { + v1+: { + composition+: { + '#fromXRD':: d.fn(help=||| + Create a Composition based on an XRD. + + Attributes: + - `name` of the composition + - `namespace` where connectionDetails are propagated too, commonly the the + management namespace (ie. crossplane) + - `provider` of the resources in this composition + - `xrdRef` XRD object with which this composition is compatible + - `xrdVersion` Version of XRD object with which this composition is compatible + |||, args=[ + d.arg('name', d.T.string), + d.arg('namespace', d.T.string), + d.arg('provider', d.T.string), + d.arg('xrdRef', d.T.object), + d.arg('xrdVersion', d.T.string), + ]), + fromXRD(name, namespace, provider, xrdRef, xrdVersion): + super.new(name) + + super.metadata.withAnnotations({ + // Tell Tanka to not set metadata.namespace. + 'tanka.dev/namespaced': 'false', + }) + + super.metadata.withLabels({ + // An optional convention is to include a label of the XRD. This allows easy + // discovery of compatible Compositions. + 'crossplane.io/xrd': xrdRef.metadata.name, + // Another optional convention is to include a label of the (most common) + // provider for the resource(s) in this composition. This label can be used in + // 'compositionSelector' in an XR or Claim. + provider: provider, + }) + + // Each Composition must declare that it is compatible with a particular type of + // Composite Resource using its 'compositeTypeRef' field. The referenced + // version must be marked 'referenceable' in the XRD that defines the XR. + + super.spec.compositeTypeRef.withApiVersion(xrdRef.spec.group + '/' + xrdVersion) + + super.spec.compositeTypeRef.withKind(xrdRef.spec.names.kind) + + + // When an XR is created in response to a claim Crossplane needs to know where it + // should create the XR's connection secret. This is configured using the + // 'writeConnectionSecretsToNamespace' field. + + super.spec.withWriteConnectionSecretsToNamespace(namespace), + }, + }, + }, +} diff --git a/crossplane/1.17/_custom/resource.libsonnet b/crossplane/1.17/_custom/resource.libsonnet new file mode 100644 index 0000000..e5c85dc --- /dev/null +++ b/crossplane/1.17/_custom/resource.libsonnet @@ -0,0 +1,640 @@ +local d = import 'doc-util/main.libsonnet'; +{ + local this = self, + + util+: { + '#':: d.pkg(name='util', url='', help='A util library for Crossplane.'), + + resource: { + '#':: d.pkg(name='resource', url='', help='Create resources for Compositions.'), + + '#new':: d.fn(help=||| + Create a new instance of a resource for a Composition, where `name` is a unique + name within of the resource within the Composition resource list and `resource` is + the jsonnet library for this resource (for example: + `(import 'provider-sql/0.4/main.libsonnet').mysql.v1alpha1.database`) + |||, args=[ + d.arg('name', d.T.string), + d.arg('resource', d.T.object), + ]), + new(name, resource): + { + // Unique name within the composition + name: name, + // Expose the resource internally for use in withBaseMixin + resource:: resource, + } + + self.withBaseMixin( + function(resource) + // Call new to set apiVersion/Kind + resource.new('') + // Remove metadata as this is not relevant for a composition + + { metadata:: '' }, + ), + + '#withBaseMixin':: d.fn(help=||| + Extend the resource base, this expects a function with the resource library as its + argument: + ``` + + util.resource.withBaseMixin( + function(resource) resource.spec.withAttribute('value'), + ) + ``` + |||, args=[ + d.arg('baseFunc', d.T.func), + ]), + withBaseMixin(baseFunc):: { + base+: baseFunc(self.resource), + }, + + '#withPatchesMixin':: d.fn(help=||| + Add patches that can be applied to this resource. + |||, args=[ + d.arg('patches', d.T.array), + ]), + withPatchesMixin(patches):: { + patches+: patches, + }, + + '#withConnectionDetailsMixin':: d.fn(help=||| + Add connectionDetails that will be provided by this resource. + + This also configures the writeConnectionSecretToRef to properly propagate the + connectionDetails. The `namespace` attribute conventionally matches the Crossplane + system namespace. + |||, args=[ + d.arg('namespace', d.T.string), + d.arg('connectionDetails', d.T.array), + ]), + withConnectionDetailsMixin(namespace, connectionDetails):: { + local resource = super.resource, + local m = { resource:: resource } + this.util.resource.withConnectionSecretMixin(super.name, namespace), + base+: m.base, + patches+: m.patches, + + connectionDetails+: connectionDetails, + }, + + // Here are a few common base/patch combinations + + '#withConnectionSecretMixin':: d.fn(help=||| + withConnectionSecretMixin ensures connectionSecrets are propagated to the + management (ie. crossplane) namespace, the name of the secret will be + <metadata.uid>-<suffix>. + |||, args=[ + d.arg('suffix', d.T.string), + d.arg('namespace', d.T.string), + ]), + withConnectionSecretMixin(suffix, namespace):: + self.withBaseMixin( + function(resource) + resource.spec.writeConnectionSecretToRef.withNamespace(namespace), + ) + + self.withPatchesMixin([ + this.util.patch.fromCompositeFieldPath( + 'metadata.uid', + 'spec.writeConnectionSecretToRef.name' + ) + { + transforms: [{ + type: 'string', + string: { + fmt: '%s-' + suffix, + type: 'Format', + }, + }], + }, + ]), + + '#withDeleteProtectionPatch':: d.fn(help=||| + withDeleteProtectionPatch provides protection mechanism for unwanted removal + of resources, it defaults to keep resources around. + |||, args=[ + d.arg('default', d.T.string, 'Orphan'), + ]), + withDeleteProtectionPatch(default='Orphan'):: + self.withBaseMixin( + function(instance) + instance.spec.withDeletionPolicy(default) + ) + + self.withPatchesMixin([ + this.util.patch.fromCompositeFieldPath( + 'spec.parameters.deleteProtection', + 'spec.deletionPolicy', + ) + + this.util.patch.transforms.bool( + true_value='Orphan', + false_value='Delete', + ), + ]), + + '#withExternalNamePatch':: d.fn(help=||| + withExternalNamePatch is commonly used by providers to name the upstream + resource or to import existing resources + |||), + withExternalNamePatch():: + self.withPatchesMixin([ + this.util.patch.fromCompositeFieldPath( + 'spec.parameters.externalName', + 'metadata.annotations["crossplane.io/external-name"]', + ), + ]), + }, + + patch: { + '#':: d.pkg(name='patch', url='', help='Create patches for Composition resources.'), + + '#fromCompositeFieldPath':: d.fn(help=||| + This type patches from a field within the XR to a field within the composed + resource. It’s commonly used to expose a composed resource spec field as an XR + spec field. + |||, args=[ + d.arg('from', d.T.string), + d.arg('to', d.T.string), + ]), + fromCompositeFieldPath(from, to): { + type: 'FromCompositeFieldPath', + fromFieldPath: from, + toFieldPath: to, + }, + + '#toCompositeFieldPath':: d.fn(help=||| + The inverse of FromCompositeFieldPath. This type patches from a field within the + composed resource to a field within the XR. It’s commonly used to derive an XR + status field from a composed resource status field. + |||, args=[ + d.arg('from', d.T.string), + d.arg('to', d.T.string), + ]), + toCompositeFieldPath(from, to): { + type: 'ToCompositeFieldPath', + fromFieldPath: from, + toFieldPath: to, + }, + + local combine(type, toFieldPath, fmtString, fromFieldPaths) = { + type: type, + combine: { + variables: [ + { + fromFieldPath: fromFieldPath, + } + for fromFieldPath in fromFieldPaths + ], + strategy: 'string', + string: { + fmt: fmtString, + }, + }, + toFieldPath: toFieldPath, + }, + + + '#combineFromComposite':: d.fn(help=||| + This type patches from a combination of multiple fields within the XR + to a field within the composed resource. + It’s commonly used to expose a composed resource spec field as an XR spec field. + |||, args=[ + d.arg('toFieldPath', d.T.string), + d.arg('fmtString', d.T.string), + d.arg('fromFieldPaths', d.T.array), + ]), + combineFromComposite(toFieldPath, fmtString, fromFieldPaths):: combine( + 'CombineFromComposite', + toFieldPath, + fmtString, + fromFieldPaths, + ), + + '#combineToComposite':: d.fn(help=||| + The inverse of CombineFromComposite. This type patches from multiple fields + within the composed resource to a a field within the XR. + It’s commonly used to derive an XR status field from a combination of resource fields. + |||, args=[ + d.arg('toFieldPath', d.T.string), + d.arg('fmtString', d.T.string), + d.arg('fromFieldPaths', d.T.array), + ]), + combineToComposite(toFieldPath, fmtString, fromFieldPaths):: combine( + 'CombineToComposite', + toFieldPath, + fmtString, + fromFieldPaths, + ), + + + policy: { + '#fromFieldPath':: d.fn(help=||| + By default Crossplane will skip the patch until all of the variables to be + combined have values. Set the fromFieldPath policy to 'Required' to instead + abort composition and return an error if a variable has no value. + |||, args=[ + d.arg('value', d.T.string), + ]), + fromFieldPath(value): { + policy: { + fromFieldPath: value, + }, + }, + + '#withMergeOptions':: d.fn(help=||| + You can patch entire objects or arrays from one resource to another. By default + the 'to' object or array will be overwritten, not merged. Use the 'mergeOptions' + field to override this behaviour. Note that these fields accidentally leak Go + terminology - 'slice' means 'array'. 'map' means 'map' in YAML or 'object' in + JSON. + |||, args=[ + d.arg('appendSlice', d.T.bool), + d.arg('keepMapValues', d.T.bool), + ]), + withMergeOptions(appendSlice, keepMapValues): { + policy: { + mergeOptions: { + appendSlice: appendSlice, + keepMapValues: keepMapValues, + }, + }, + }, + }, + + transforms: { + local convertTransform(toType) = { + type: 'convert', + convert: { toType: toType }, + }, + + '#convert':: d.fn(help=||| + Convert a field to a different type. + |||, args=[ + d.arg('toType', d.T.string), + ]), + convert(toType): { + transforms+: [convertTransform(toType)], + }, + + '#bool':: d.fn(help=||| + Transform strings to booleans. + Example: `bool(true_value='Orphan', false_value='Delete')` + |||, args=[ + d.arg('true_value', d.T.string), + d.arg('false_value', d.T.string), + ]), + bool(true_value, false_value): { + transforms+: [ + convertTransform('string'), + { + type: 'map', + map: { + 'true': true_value, + 'false': false_value, + }, + }, + ], + }, + + '#map':: d.fn(help=||| + Use a Map to transform keys into values. + |||, args=[ + d.arg('map', d.T.object), + ]), + map(map={}): { + transforms+: [ + { + type: 'map', + map: map, + }, + ], + }, + + '#match':: d.fn(help=||| + Match a value to a list of patterns. + Use the literalPattern or regexpPattern function to create the patterns. + Return the fallbackValue or fallback to the input if no pattern matches. + |||, args=[ + d.arg('patterns', d.T.array), + d.arg('fallbackValue', d.T.string), + d.arg('fallbackTo', d.T.string), + ]), + match(patterns, fallbackValue=null, fallbackTo='Value'): { + assert fallbackTo == 'Input' || (fallbackTo == 'Value' && fallbackValue != null) : + 'fallbackTo must be set to either "Input" or "Value" (with a fallbackValue in that case))', + local patternsArray = if std.isArray(patterns) then patterns else [patterns], + + transforms+: [ + convertTransform('string'), + { + type: 'match', + match: { + patterns: patterns, + fallbackTo: fallbackTo, + } + ( + if fallbackTo == 'Value' then { + fallbackValue: fallbackValue, + } else {} + ), + }, + ], + }, + + '#literalPattern':: d.fn(help=||| + Match a value against a literal, and return the result if the value matches. + To be used with the match transform. + |||, args=[ + d.arg('literal', d.T.string), + d.arg('result', d.T.string), + ]), + literalPattern(literal, result): { + type: 'literal', + literal: literal, + result: result, + }, + + '#regexpPattern':: d.fn(help=||| + Match a value against a regexp, and return the result if the value matches. + To be used with the match transform. + |||, args=[ + d.arg('regexp', d.T.string), + d.arg('result', d.T.string), + ]), + regexpPattern(regexp, result): { + type: 'regexp', + regexp: regexp, + result: result, + }, + + string: { + '#fmt':: d.fn(help=||| + Format a string. The format string is a Go format string. + |||, args=[ + d.arg('fmt', d.T.string), + ]), + fmt(fmt): { + type: 'string', + string: { + type: 'Format', + fmt: fmt, + }, + }, + + local convertTransform(type) = { + type: 'string', + string: { + type: 'Convert', + convert: type, + }, + }, + + // generate a convertXXX for each of the convert types + '#convertToUpper':: d.fn(help=||| + Convert a string to upper case. + |||), + convertToUpper: convertTransform('ToUpper'), + + '#convertToLower':: d.fn(help=||| + Convert a string to lower case. + |||), + convertToLower: convertTransform('ToLower'), + + '#convertToBase64':: d.fn(help=||| + Convert a string to base64. + |||), + convertToBase64: convertTransform('ToBase64'), + + '#convertFromBase64':: d.fn(help=||| + Convert a base64 string to a string. + |||), + convertFromBase64: convertTransform('FromBase64'), + + '#convertToJson':: d.fn(help=||| + Convert a string to JSON. + |||), + + '#convertToSha1':: d.fn(help=||| + Convert a string to a SHA1 hash. + |||), + convertToSha1: convertTransform('ToSha1'), + + '#convertToSha256':: d.fn(help=||| + Convert a string to a SHA256 hash. + |||), + convertToSha256: convertTransform('ToSha256'), + + '#convertToSha512':: d.fn(help=||| + Convert a string to a SHA512 hash. + |||), + convertToSha512: convertTransform('ToSha512'), + + local trimTransform(type, trim) = { + type: 'string', + string: { + type: type, + trim: trim, + }, + }, + + '#trimPrefix':: d.fn(help=||| + Trim a prefix from a string. + |||, args=[ + d.arg('trim', d.T.string), + ]), + trimPrefix(trim): trimTransform('TrimPrefix', trim), + + '#trimSuffix':: d.fn(help=||| + Trim a suffix from a string. + |||, args=[ + d.arg('trim', d.T.string), + ]), + trimSuffix(trim): trimTransform('TrimSuffix', trim), + + '#regexp':: d.fn(help=||| + Match a regexp against a string. The group is optional and if omitted, the whole match is returned. + |||, args=[ + d.arg('match', d.T.string), + d.arg('group', d.T.number), + ]), + regexp(match, group=''): { + type: 'string', + string: { + type: 'Regexp', + regexp: { + match: match, + [if group != '' then 'group']: group, + }, + }, + }, + }, + + local mathTransform(type, attribute, value) = { + type: 'math', + math: { + type: type, + [attribute]: value, + }, + }, + + '#clampMin':: d.fn(help=||| + Clamp a number to a minimum value. + |||, args=[ + d.arg('min', d.T.number), + ]), + clampMin(min): mathTransform('ClampMin', 'clampMin', min), + + '#clampMax':: d.fn(help=||| + Clamp a number to a maximum value. + |||, args=[ + d.arg('max', d.T.number), + ]), + clampMax(max): mathTransform('ClampMax', 'clampMax', max), + }, + }, + + connectionDetail: { + '#':: d.pkg(name='connectionDetail', url='', help='Create connectionDetails for Compositions.'), + + '#fromConnectionSecretKey':: d.fn(help=||| + Derive the XR's connection detail field `name` from the `key` of the composed + resource's connection secret. The argument `name` defaults to the value of `key`. + |||, args=[ + d.arg('key', d.T.string), + d.arg('name', d.T.string, '-same as key-'), + ]), + fromConnectionSecretKey(key, name=''): { + name: if name == '' then key else name, + fromConnectionSecretKey: key, + }, + + '#fromFieldPath':: d.fn(help=||| + Derive the XR's connection detail field `name` from the `key` field path of the + composed resource. + |||, args=[ + d.arg('key', d.T.string), + d.arg('name', d.T.string), + ]), + fromFieldPath(key, name): { + type: 'FromFieldPath', + name: name, + fromFieldPath: key, + }, + + '#fromValue':: d.fn(help=||| + Always sets the XR's connection detail field `name` to `value`. + |||, args=[ + d.arg('value', d.T.string), + d.arg('name', d.T.string), + ]), + fromValue(value, name): { + type: 'FromFieldPath', + name: name, + fromValue: value, + }, + }, + + version: { + '#':: d.pkg(name='version', url='', help='Create versions for CompositeResourceDefinitions.'), + + '#new':: d.fn(help=||| + Create a new `version` (e.g. v1alpha1, v1beta1 and v1) schema for an XRD. + + `served` specifies that XRs should be served at this version. It can be set to + false to temporarily disable a version, for example to test whether doing so + breaks anything before a version is removed wholesale. + + `referenceable` denotes the version of a type of XR that Compositions may use. + Only one version may be referenceable. + |||, args=[ + d.arg('version', d.T.string), + d.arg('served', d.T.bool, 'true'), + d.arg('referenceable', d.T.bool, 'true'), + ]), + new(version, served=true, referenceable=true): { + name: version, + served: served, + referenceable: referenceable, + schema: { + openAPIV3Schema: { + type: 'object', + properties: { + apiVersion: { + type: 'string', + }, + kind: { + type: 'string', + }, + metadata: { + type: 'object', + }, + spec: { + type: 'object', + // See addParameterProperty helper function below + //properties: { + // parameters: { + // type: 'object', + // }, + //}, + //required: [ + // 'parameters', + //], + }, + }, + }, + }, + }, + + '#withPropertiesMixin':: d.fn(help=||| + Extend Schema with properties. + + Schema is an OpenAPI schema just like the one used by Kubernetes CRDs. It + determines what fields your XR and claim will have. Note that Crossplane will + automatically extend with some additional Crossplane machinery. + |||, args=[ + d.arg('properties', d.T.object), + ]), + withPropertiesMixin(properties): { + schema+: { + openAPIV3Schema+: { + properties+: properties, + }, + }, + }, + + '#addParameterProperty':: d.fn(help=||| + Add properties to the Schema. + + This shortcut enables to quickly extend the 'parameters' property of a version. + + Attributes: + + - `name` of the property + - `type` eg. string, number, array, object + - `description` for documentation + - `required` is this a required property? + - `mixin` can be used to add enums or the type of an array member for validation. + |||, args=[ + d.arg('name', d.T.string), + d.arg('type', d.T.string, 'string|number|array|object'), + d.arg('description', d.T.string, ''), + d.arg('required', d.T.bool), + d.arg('mixin', d.T.object), + ]), + addParameterProperty(name, type, description='', required=false, mixin={}): + self.withPropertiesMixin({ + spec+: { + properties+: { + parameters+: { + type: 'object', + properties+: { + [name]: { + type: type, + [if description != '' then 'description']: description, + } + mixin, + }, + required+: + if required + then [name] + else [], + }, + }, + }, + }), + }, + }, +} diff --git a/crossplane/1.17/_gen/apiextensions/main.libsonnet b/crossplane/1.17/_gen/apiextensions/main.libsonnet new file mode 100644 index 0000000..01b5789 --- /dev/null +++ b/crossplane/1.17/_gen/apiextensions/main.libsonnet @@ -0,0 +1,7 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='apiextensions', url='', help=''), + v1: (import 'v1/main.libsonnet'), + v1alpha1: (import 'v1alpha1/main.libsonnet'), + v1beta1: (import 'v1beta1/main.libsonnet'), +} diff --git a/crossplane/1.17/_gen/apiextensions/v1/compositeResourceDefinition.libsonnet b/crossplane/1.17/_gen/apiextensions/v1/compositeResourceDefinition.libsonnet new file mode 100644 index 0000000..f1e91ae --- /dev/null +++ b/crossplane/1.17/_gen/apiextensions/v1/compositeResourceDefinition.libsonnet @@ -0,0 +1,199 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='compositeResourceDefinition', url='', help='"A CompositeResourceDefinition defines the schema for a new custom Kubernetes\\nAPI.\\n\\n\\nRead the Crossplane documentation for\\n[more information about CustomResourceDefinitions](https://docs.crossplane.io/latest/concepts/composite-resource-definitions)."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of CompositeResourceDefinition', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'apiextensions.crossplane.io/v1', + kind: 'CompositeResourceDefinition', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"CompositeResourceDefinitionSpec specifies the desired state of the definition."'), + spec: { + '#claimNames':: d.obj(help="\"ClaimNames specifies the names of an optional composite resource claim.\\nWhen claim names are specified Crossplane will create a namespaced\\n'composite resource claim' CRD that corresponds to the defined composite\\nresource. This composite resource claim acts as a namespaced proxy for\\nthe composite resource; creating, updating, or deleting the claim will\\ncreate, update, or delete a corresponding composite resource. You may add\\nclaim names to an existing CompositeResourceDefinition, but they cannot\\nbe changed or removed once they have been set.\""), + claimNames: { + '#withCategories':: d.fn(help="\"categories is a list of grouped resources this custom resource belongs to (e.g. 'all').\\nThis is published in API discovery documents, and used by clients to support invocations like\\n`kubectl get all`.\"", args=[d.arg(name='categories', type=d.T.array)]), + withCategories(categories): { spec+: { claimNames+: { categories: if std.isArray(v=categories) then categories else [categories] } } }, + '#withCategoriesMixin':: d.fn(help="\"categories is a list of grouped resources this custom resource belongs to (e.g. 'all').\\nThis is published in API discovery documents, and used by clients to support invocations like\\n`kubectl get all`.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='categories', type=d.T.array)]), + withCategoriesMixin(categories): { spec+: { claimNames+: { categories+: if std.isArray(v=categories) then categories else [categories] } } }, + '#withKind':: d.fn(help='"kind is the serialized kind of the resource. It is normally CamelCase and singular.\\nCustom resource instances will use this value as the `kind` attribute in API calls."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { claimNames+: { kind: kind } } }, + '#withListKind':: d.fn(help='"listKind is the serialized kind of the list for this resource. Defaults to \\"`kind`List\\"."', args=[d.arg(name='listKind', type=d.T.string)]), + withListKind(listKind): { spec+: { claimNames+: { listKind: listKind } } }, + '#withPlural':: d.fn(help='"plural is the plural name of the resource to serve.\\nThe custom resources are served under `/apis///.../`.\\nMust match the name of the CustomResourceDefinition (in the form `.`).\\nMust be all lowercase."', args=[d.arg(name='plural', type=d.T.string)]), + withPlural(plural): { spec+: { claimNames+: { plural: plural } } }, + '#withShortNames':: d.fn(help='"shortNames are short names for the resource, exposed in API discovery documents,\\nand used by clients to support invocations like `kubectl get `.\\nIt must be all lowercase."', args=[d.arg(name='shortNames', type=d.T.array)]), + withShortNames(shortNames): { spec+: { claimNames+: { shortNames: if std.isArray(v=shortNames) then shortNames else [shortNames] } } }, + '#withShortNamesMixin':: d.fn(help='"shortNames are short names for the resource, exposed in API discovery documents,\\nand used by clients to support invocations like `kubectl get `.\\nIt must be all lowercase."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='shortNames', type=d.T.array)]), + withShortNamesMixin(shortNames): { spec+: { claimNames+: { shortNames+: if std.isArray(v=shortNames) then shortNames else [shortNames] } } }, + '#withSingular':: d.fn(help='"singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`."', args=[d.arg(name='singular', type=d.T.string)]), + withSingular(singular): { spec+: { claimNames+: { singular: singular } } }, + }, + '#conversion':: d.obj(help='"Conversion defines all conversion settings for the defined Composite resource."'), + conversion: { + '#webhook':: d.obj(help='"webhook describes how to call the conversion webhook. Required when `strategy` is set to `\\"Webhook\\"`."'), + webhook: { + '#clientConfig':: d.obj(help='"clientConfig is the instructions for how to call the webhook if strategy is `Webhook`."'), + clientConfig: { + '#service':: d.obj(help='"service is a reference to the service for this webhook. Either\\nservice or url must be specified.\\n\\n\\nIf the webhook is running within the cluster, then you should use `service`."'), + service: { + '#withName':: d.fn(help='"name is the name of the service.\\nRequired"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { conversion+: { webhook+: { clientConfig+: { service+: { name: name } } } } } }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the service.\\nRequired"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { conversion+: { webhook+: { clientConfig+: { service+: { namespace: namespace } } } } } }, + '#withPath':: d.fn(help='"path is an optional URL path at which the webhook will be contacted."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { spec+: { conversion+: { webhook+: { clientConfig+: { service+: { path: path } } } } } }, + '#withPort':: d.fn(help='"port is an optional service port at which the webhook will be contacted.\\n`port` should be a valid port number (1-65535, inclusive).\\nDefaults to 443 for backward compatibility."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { spec+: { conversion+: { webhook+: { clientConfig+: { service+: { port: port } } } } } }, + }, + '#withCaBundle':: d.fn(help="\"caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.\\nIf unspecified, system trust roots on the apiserver are used.\"", args=[d.arg(name='caBundle', type=d.T.string)]), + withCaBundle(caBundle): { spec+: { conversion+: { webhook+: { clientConfig+: { caBundle: caBundle } } } } }, + '#withUrl':: d.fn(help='"url gives the location of the webhook, in standard URL form\\n(`scheme://host:port/path`). Exactly one of `url` or `service`\\nmust be specified.\\n\\n\\nThe `host` should not refer to a service running in the cluster; use\\nthe `service` field instead. The host might be resolved via external\\nDNS in some apiservers (e.g., `kube-apiserver` cannot resolve\\nin-cluster DNS as that would be a layering violation). `host` may\\nalso be an IP address.\\n\\n\\nPlease note that using `localhost` or `127.0.0.1` as a `host` is\\nrisky unless you take great care to run this webhook on all hosts\\nwhich run an apiserver which might need to make calls to this\\nwebhook. Such installs are likely to be non-portable, i.e., not easy\\nto turn up in a new cluster.\\n\\n\\nThe scheme must be \\"https\\"; the URL must begin with \\"https://\\".\\n\\n\\nA path is optional, and if present may be any string permissible in\\na URL. You may use the path to pass an arbitrary string to the\\nwebhook, for example, a cluster identifier.\\n\\n\\nAttempting to use a user or basic auth e.g. \\"user:password@\\" is not\\nallowed. Fragments (\\"#...\\") and query parameters (\\"?...\\") are not\\nallowed, either."', args=[d.arg(name='url', type=d.T.string)]), + withUrl(url): { spec+: { conversion+: { webhook+: { clientConfig+: { url: url } } } } }, + }, + '#withConversionReviewVersions':: d.fn(help='"conversionReviewVersions is an ordered list of preferred `ConversionReview`\\nversions the Webhook expects. The API server will use the first version in\\nthe list which it supports. If none of the versions specified in this list\\nare supported by API server, conversion will fail for the custom resource.\\nIf a persisted Webhook configuration specifies allowed versions and does not\\ninclude any versions known to the API Server, calls to the webhook will fail."', args=[d.arg(name='conversionReviewVersions', type=d.T.array)]), + withConversionReviewVersions(conversionReviewVersions): { spec+: { conversion+: { webhook+: { conversionReviewVersions: if std.isArray(v=conversionReviewVersions) then conversionReviewVersions else [conversionReviewVersions] } } } }, + '#withConversionReviewVersionsMixin':: d.fn(help='"conversionReviewVersions is an ordered list of preferred `ConversionReview`\\nversions the Webhook expects. The API server will use the first version in\\nthe list which it supports. If none of the versions specified in this list\\nare supported by API server, conversion will fail for the custom resource.\\nIf a persisted Webhook configuration specifies allowed versions and does not\\ninclude any versions known to the API Server, calls to the webhook will fail."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conversionReviewVersions', type=d.T.array)]), + withConversionReviewVersionsMixin(conversionReviewVersions): { spec+: { conversion+: { webhook+: { conversionReviewVersions+: if std.isArray(v=conversionReviewVersions) then conversionReviewVersions else [conversionReviewVersions] } } } }, + }, + '#withStrategy':: d.fn(help='"strategy specifies how custom resources are converted between versions. Allowed values are:\\n- `\\"None\\"`: The converter only change the apiVersion and would not touch any other field in the custom resource.\\n- `\\"Webhook\\"`: API Server will call to an external webhook to do the conversion. Additional information\\n is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { spec+: { conversion+: { strategy: strategy } } }, + }, + '#defaultCompositionRef':: d.obj(help='"DefaultCompositionRef refers to the Composition resource that will be used\\nin case no composition selector is given."'), + defaultCompositionRef: { + '#withName':: d.fn(help='"Name of the Composition."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { defaultCompositionRef+: { name: name } } }, + }, + '#enforcedCompositionRef':: d.obj(help='"EnforcedCompositionRef refers to the Composition resource that will be used\\nby all composite instances whose schema is defined by this definition."'), + enforcedCompositionRef: { + '#withName':: d.fn(help='"Name of the Composition."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { enforcedCompositionRef+: { name: name } } }, + }, + '#metadata':: d.obj(help="\"Metadata specifies the desired metadata for the defined composite resource and claim CRD's.\""), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be\\nset by external tools to store and retrieve arbitrary metadata. They are not\\nqueryable and should be preserved when modifying objects.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { spec+: { metadata+: { annotations: annotations } } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be\\nset by external tools to store and retrieve arbitrary metadata. They are not\\nqueryable and should be preserved when modifying objects.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { spec+: { metadata+: { annotations+: annotations } } }, + '#withLabels':: d.fn(help="\"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels\\nand services.\\nThese labels are added to the composite resource and claim CRD's in addition\\nto any labels defined by `CompositionResourceDefinition` `metadata.labels`.\"", args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { spec+: { metadata+: { labels: labels } } }, + '#withLabelsMixin':: d.fn(help="\"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels\\nand services.\\nThese labels are added to the composite resource and claim CRD's in addition\\nto any labels defined by `CompositionResourceDefinition` `metadata.labels`.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { spec+: { metadata+: { labels+: labels } } }, + }, + '#names':: d.obj(help='"Names specifies the resource and kind names of the defined composite\\nresource."'), + names: { + '#withCategories':: d.fn(help="\"categories is a list of grouped resources this custom resource belongs to (e.g. 'all').\\nThis is published in API discovery documents, and used by clients to support invocations like\\n`kubectl get all`.\"", args=[d.arg(name='categories', type=d.T.array)]), + withCategories(categories): { spec+: { names+: { categories: if std.isArray(v=categories) then categories else [categories] } } }, + '#withCategoriesMixin':: d.fn(help="\"categories is a list of grouped resources this custom resource belongs to (e.g. 'all').\\nThis is published in API discovery documents, and used by clients to support invocations like\\n`kubectl get all`.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='categories', type=d.T.array)]), + withCategoriesMixin(categories): { spec+: { names+: { categories+: if std.isArray(v=categories) then categories else [categories] } } }, + '#withKind':: d.fn(help='"kind is the serialized kind of the resource. It is normally CamelCase and singular.\\nCustom resource instances will use this value as the `kind` attribute in API calls."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { names+: { kind: kind } } }, + '#withListKind':: d.fn(help='"listKind is the serialized kind of the list for this resource. Defaults to \\"`kind`List\\"."', args=[d.arg(name='listKind', type=d.T.string)]), + withListKind(listKind): { spec+: { names+: { listKind: listKind } } }, + '#withPlural':: d.fn(help='"plural is the plural name of the resource to serve.\\nThe custom resources are served under `/apis///.../`.\\nMust match the name of the CustomResourceDefinition (in the form `.`).\\nMust be all lowercase."', args=[d.arg(name='plural', type=d.T.string)]), + withPlural(plural): { spec+: { names+: { plural: plural } } }, + '#withShortNames':: d.fn(help='"shortNames are short names for the resource, exposed in API discovery documents,\\nand used by clients to support invocations like `kubectl get `.\\nIt must be all lowercase."', args=[d.arg(name='shortNames', type=d.T.array)]), + withShortNames(shortNames): { spec+: { names+: { shortNames: if std.isArray(v=shortNames) then shortNames else [shortNames] } } }, + '#withShortNamesMixin':: d.fn(help='"shortNames are short names for the resource, exposed in API discovery documents,\\nand used by clients to support invocations like `kubectl get `.\\nIt must be all lowercase."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='shortNames', type=d.T.array)]), + withShortNamesMixin(shortNames): { spec+: { names+: { shortNames+: if std.isArray(v=shortNames) then shortNames else [shortNames] } } }, + '#withSingular':: d.fn(help='"singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`."', args=[d.arg(name='singular', type=d.T.string)]), + withSingular(singular): { spec+: { names+: { singular: singular } } }, + }, + '#versions':: d.obj(help='"Versions is the list of all API versions of the defined composite\\nresource. Version names are used to compute the order in which served\\nversions are listed in API discovery. If the version string is\\n\\"kube-like\\", it will sort above non \\"kube-like\\" version strings, which\\nare ordered lexicographically. \\"Kube-like\\" versions start with a \\"v\\",\\nthen are followed by a number (the major version), then optionally the\\nstring \\"alpha\\" or \\"beta\\" and another number (the minor version). These\\nare sorted first by GA > beta > alpha (where GA is a version with no\\nsuffix such as beta or alpha), and then by comparing major version, then\\nminor version. An example sorted list of versions: v10, v2, v1, v11beta2,\\nv10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10."'), + versions: { + '#additionalPrinterColumns':: d.obj(help='"AdditionalPrinterColumns specifies additional columns returned in Table\\noutput. If no columns are specified, a single column displaying the age\\nof the custom resource is used. See the following link for details:\\nhttps://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables"'), + additionalPrinterColumns: { + '#withDescription':: d.fn(help='"description is a human readable description of this column."', args=[d.arg(name='description', type=d.T.string)]), + withDescription(description): { description: description }, + '#withFormat':: d.fn(help="\"format is an optional OpenAPI type definition for this column. The 'name' format is applied\\nto the primary identifier column to assist in clients identifying column is the resource name.\\nSee https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.\"", args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { format: format }, + '#withJsonPath':: d.fn(help='"jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against\\neach custom resource to produce the value for this column."', args=[d.arg(name='jsonPath', type=d.T.string)]), + withJsonPath(jsonPath): { jsonPath: jsonPath }, + '#withName':: d.fn(help='"name is a human readable name for the column."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPriority':: d.fn(help='"priority is an integer defining the relative importance of this column compared to others. Lower\\nnumbers are considered higher priority. Columns that may be omitted in limited space scenarios\\nshould be given a priority greater than 0."', args=[d.arg(name='priority', type=d.T.integer)]), + withPriority(priority): { priority: priority }, + '#withType':: d.fn(help='"type is an OpenAPI type definition for this column.\\nSee https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#schema':: d.obj(help='"Schema describes the schema used for validation, pruning, and defaulting\\nof this version of the defined composite resource. Fields required by all\\ncomposite resources will be injected into this schema automatically, and\\nwill override equivalently named fields in this schema. Omitting this\\nschema results in a schema that contains only the fields required by all\\ncomposite resources."'), + schema: { + '#withOpenAPIV3Schema':: d.fn(help='"OpenAPIV3Schema is the OpenAPI v3 schema to use for validation and\\npruning."', args=[d.arg(name='openAPIV3Schema', type=d.T.object)]), + withOpenAPIV3Schema(openAPIV3Schema): { schema+: { openAPIV3Schema: openAPIV3Schema } }, + '#withOpenAPIV3SchemaMixin':: d.fn(help='"OpenAPIV3Schema is the OpenAPI v3 schema to use for validation and\\npruning."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='openAPIV3Schema', type=d.T.object)]), + withOpenAPIV3SchemaMixin(openAPIV3Schema): { schema+: { openAPIV3Schema+: openAPIV3Schema } }, + }, + '#withAdditionalPrinterColumns':: d.fn(help='"AdditionalPrinterColumns specifies additional columns returned in Table\\noutput. If no columns are specified, a single column displaying the age\\nof the custom resource is used. See the following link for details:\\nhttps://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables"', args=[d.arg(name='additionalPrinterColumns', type=d.T.array)]), + withAdditionalPrinterColumns(additionalPrinterColumns): { additionalPrinterColumns: if std.isArray(v=additionalPrinterColumns) then additionalPrinterColumns else [additionalPrinterColumns] }, + '#withAdditionalPrinterColumnsMixin':: d.fn(help='"AdditionalPrinterColumns specifies additional columns returned in Table\\noutput. If no columns are specified, a single column displaying the age\\nof the custom resource is used. See the following link for details:\\nhttps://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='additionalPrinterColumns', type=d.T.array)]), + withAdditionalPrinterColumnsMixin(additionalPrinterColumns): { additionalPrinterColumns+: if std.isArray(v=additionalPrinterColumns) then additionalPrinterColumns else [additionalPrinterColumns] }, + '#withDeprecated':: d.fn(help='"The deprecated field specifies that this version is deprecated and should\\nnot be used."', args=[d.arg(name='deprecated', type=d.T.boolean)]), + withDeprecated(deprecated): { deprecated: deprecated }, + '#withDeprecationWarning':: d.fn(help='"DeprecationWarning specifies the message that should be shown to the user\\nwhen using this version."', args=[d.arg(name='deprecationWarning', type=d.T.string)]), + withDeprecationWarning(deprecationWarning): { deprecationWarning: deprecationWarning }, + '#withName':: d.fn(help='"Name of this version, e.g. “v1”, “v2beta1”, etc. Composite resources are\\nserved under this version at `/apis///...` if `served` is\\ntrue."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withReferenceable':: d.fn(help="\"Referenceable specifies that this version may be referenced by a\\nComposition in order to configure which resources an XR may be composed\\nof. Exactly one version must be marked as referenceable; all Compositions\\nmust target only the referenceable version. The referenceable version\\nmust be served. It's mapped to the CRD's `spec.versions[*].storage` field.\"", args=[d.arg(name='referenceable', type=d.T.boolean)]), + withReferenceable(referenceable): { referenceable: referenceable }, + '#withServed':: d.fn(help='"Served specifies that this version should be served via REST APIs."', args=[d.arg(name='served', type=d.T.boolean)]), + withServed(served): { served: served }, + }, + '#withConnectionSecretKeys':: d.fn(help='"ConnectionSecretKeys is the list of keys that will be exposed to the end\\nuser of the defined kind.\\nIf the list is empty, all keys will be published."', args=[d.arg(name='connectionSecretKeys', type=d.T.array)]), + withConnectionSecretKeys(connectionSecretKeys): { spec+: { connectionSecretKeys: if std.isArray(v=connectionSecretKeys) then connectionSecretKeys else [connectionSecretKeys] } }, + '#withConnectionSecretKeysMixin':: d.fn(help='"ConnectionSecretKeys is the list of keys that will be exposed to the end\\nuser of the defined kind.\\nIf the list is empty, all keys will be published."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='connectionSecretKeys', type=d.T.array)]), + withConnectionSecretKeysMixin(connectionSecretKeys): { spec+: { connectionSecretKeys+: if std.isArray(v=connectionSecretKeys) then connectionSecretKeys else [connectionSecretKeys] } }, + '#withDefaultCompositeDeletePolicy':: d.fn(help='"DefaultCompositeDeletePolicy is the policy used when deleting the Composite\\nthat is associated with the Claim if no policy has been specified."', args=[d.arg(name='defaultCompositeDeletePolicy', type=d.T.string)]), + withDefaultCompositeDeletePolicy(defaultCompositeDeletePolicy): { spec+: { defaultCompositeDeletePolicy: defaultCompositeDeletePolicy } }, + '#withDefaultCompositionUpdatePolicy':: d.fn(help='"DefaultCompositionUpdatePolicy is the policy used when updating composites after a new\\nComposition Revision has been created if no policy has been specified on the composite."', args=[d.arg(name='defaultCompositionUpdatePolicy', type=d.T.string)]), + withDefaultCompositionUpdatePolicy(defaultCompositionUpdatePolicy): { spec+: { defaultCompositionUpdatePolicy: defaultCompositionUpdatePolicy } }, + '#withGroup':: d.fn(help='"Group specifies the API group of the defined composite resource.\\nComposite resources are served under `/apis//...`. Must match the\\nname of the XRD (in the form `.`)."', args=[d.arg(name='group', type=d.T.string)]), + withGroup(group): { spec+: { group: group } }, + '#withVersions':: d.fn(help='"Versions is the list of all API versions of the defined composite\\nresource. Version names are used to compute the order in which served\\nversions are listed in API discovery. If the version string is\\n\\"kube-like\\", it will sort above non \\"kube-like\\" version strings, which\\nare ordered lexicographically. \\"Kube-like\\" versions start with a \\"v\\",\\nthen are followed by a number (the major version), then optionally the\\nstring \\"alpha\\" or \\"beta\\" and another number (the minor version). These\\nare sorted first by GA > beta > alpha (where GA is a version with no\\nsuffix such as beta or alpha), and then by comparing major version, then\\nminor version. An example sorted list of versions: v10, v2, v1, v11beta2,\\nv10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10."', args=[d.arg(name='versions', type=d.T.array)]), + withVersions(versions): { spec+: { versions: if std.isArray(v=versions) then versions else [versions] } }, + '#withVersionsMixin':: d.fn(help='"Versions is the list of all API versions of the defined composite\\nresource. Version names are used to compute the order in which served\\nversions are listed in API discovery. If the version string is\\n\\"kube-like\\", it will sort above non \\"kube-like\\" version strings, which\\nare ordered lexicographically. \\"Kube-like\\" versions start with a \\"v\\",\\nthen are followed by a number (the major version), then optionally the\\nstring \\"alpha\\" or \\"beta\\" and another number (the minor version). These\\nare sorted first by GA > beta > alpha (where GA is a version with no\\nsuffix such as beta or alpha), and then by comparing major version, then\\nminor version. An example sorted list of versions: v10, v2, v1, v11beta2,\\nv10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='versions', type=d.T.array)]), + withVersionsMixin(versions): { spec+: { versions+: if std.isArray(v=versions) then versions else [versions] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/apiextensions/v1/composition.libsonnet b/crossplane/1.17/_gen/apiextensions/v1/composition.libsonnet new file mode 100644 index 0000000..fb5f110 --- /dev/null +++ b/crossplane/1.17/_gen/apiextensions/v1/composition.libsonnet @@ -0,0 +1,588 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='composition', url='', help='"A Composition defines a collection of managed resources or functions that\\nCrossplane uses to create and manage new composite resources.\\n\\n\\nRead the Crossplane documentation for\\n[more information about Compositions](https://docs.crossplane.io/latest/concepts/compositions)."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Composition', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'apiextensions.crossplane.io/v1', + kind: 'Composition', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"CompositionSpec specifies desired state of a composition."'), + spec: { + '#compositeTypeRef':: d.obj(help='"CompositeTypeRef specifies the type of composite resource that this\\ncomposition is compatible with."'), + compositeTypeRef: { + '#withApiVersion':: d.fn(help='"APIVersion of the type."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { compositeTypeRef+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind of the type."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { compositeTypeRef+: { kind: kind } } }, + }, + '#environment':: d.obj(help='"Environment configures the environment in which resources are rendered.\\n\\n\\nTHIS IS AN ALPHA FIELD. Do not use it in production. It is not honored\\nunless the relevant Crossplane feature flag is enabled, and may be\\nchanged or removed without notice."'), + environment: { + '#environmentConfigs':: d.obj(help='"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\\nresources are stored in the composite resource at\\n`spec.environmentConfigRefs` and is only updated if it is null.\\n\\n\\nThe list of references is used to compute an in-memory environment at\\ncompose time. The data of all object is merged in the order they are\\nlisted, meaning the values of EnvironmentConfigs with a larger index take\\npriority over ones with smaller indices.\\n\\n\\nThe computed environment can be accessed in a composition using\\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches."'), + environmentConfigs: { + '#ref':: d.obj(help='"Ref is a named reference to a single EnvironmentConfig.\\nEither Ref or Selector is required."'), + ref: { + '#withName':: d.fn(help='"The name of the object."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { ref+: { name: name } }, + }, + '#selector':: d.obj(help='"Selector selects EnvironmentConfig(s) via labels."'), + selector: { + '#matchLabels':: d.obj(help='"MatchLabels ensures an object with matching labels is selected."'), + matchLabels: { + '#withFromFieldPathPolicy':: d.fn(help='"FromFieldPathPolicy specifies the policy for the valueFromFieldPath.\\nThe default is Required, meaning that an error will be returned if the\\nfield is not found in the composite resource.\\nOptional means that if the field is not found in the composite resource,\\nthat label pair will just be skipped. N.B. other specified label\\nmatchers will still be used to retrieve the desired\\nenvironment config, if any."', args=[d.arg(name='fromFieldPathPolicy', type=d.T.string)]), + withFromFieldPathPolicy(fromFieldPathPolicy): { fromFieldPathPolicy: fromFieldPathPolicy }, + '#withKey':: d.fn(help='"Key of the label to match."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withType':: d.fn(help='"Type specifies where the value for a label comes from."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#withValue':: d.fn(help='"Value specifies a literal label value."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + '#withValueFromFieldPath':: d.fn(help='"ValueFromFieldPath specifies the field path to look for the label value."', args=[d.arg(name='valueFromFieldPath', type=d.T.string)]), + withValueFromFieldPath(valueFromFieldPath): { valueFromFieldPath: valueFromFieldPath }, + }, + '#withMatchLabels':: d.fn(help='"MatchLabels ensures an object with matching labels is selected."', args=[d.arg(name='matchLabels', type=d.T.array)]), + withMatchLabels(matchLabels): { selector+: { matchLabels: if std.isArray(v=matchLabels) then matchLabels else [matchLabels] } }, + '#withMatchLabelsMixin':: d.fn(help='"MatchLabels ensures an object with matching labels is selected."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.array)]), + withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: if std.isArray(v=matchLabels) then matchLabels else [matchLabels] } }, + '#withMaxMatch':: d.fn(help='"MaxMatch specifies the number of extracted EnvironmentConfigs in Multiple mode, extracts all if nil."', args=[d.arg(name='maxMatch', type=d.T.integer)]), + withMaxMatch(maxMatch): { selector+: { maxMatch: maxMatch } }, + '#withMinMatch':: d.fn(help='"MinMatch specifies the required minimum of extracted EnvironmentConfigs in Multiple mode."', args=[d.arg(name='minMatch', type=d.T.integer)]), + withMinMatch(minMatch): { selector+: { minMatch: minMatch } }, + '#withMode':: d.fn(help='"Mode specifies retrieval strategy: \\"Single\\" or \\"Multiple\\"."', args=[d.arg(name='mode', type=d.T.string)]), + withMode(mode): { selector+: { mode: mode } }, + '#withSortByFieldPath':: d.fn(help='"SortByFieldPath is the path to the field based on which list of EnvironmentConfigs is alphabetically sorted."', args=[d.arg(name='sortByFieldPath', type=d.T.string)]), + withSortByFieldPath(sortByFieldPath): { selector+: { sortByFieldPath: sortByFieldPath } }, + }, + '#withType':: d.fn(help='"Type specifies the way the EnvironmentConfig is selected.\\nDefault is `Reference`"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#patches':: d.obj(help="\"Patches is a list of environment patches that are executed before a\\ncomposition's resources are composed.\""), + patches: { + '#combine':: d.obj(help='"Combine is the patch configuration for a CombineFromComposite or\\nCombineToComposite patch."'), + combine: { + '#string':: d.obj(help='"String declares that input variables should be combined into a single\\nstring, using the relevant settings for formatting purposes."'), + string: { + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { combine+: { string+: { fmt: fmt } } }, + }, + '#variables':: d.obj(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."'), + variables: { + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the source whose value is\\nto be used as input."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + }, + '#withStrategy':: d.fn(help='"Strategy defines the strategy to use to combine the input variable values.\\nCurrently only string is supported."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { combine+: { strategy: strategy } }, + '#withVariables':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { combine+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { combine+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#policy':: d.obj(help='"Policy configures the specifics of patching behaviour."'), + policy: { + '#mergeOptions':: d.obj(help='"MergeOptions Specifies merge options on a field path."'), + mergeOptions: { + '#withAppendSlice':: d.fn(help='"Specifies that already existing elements in a merged slice should be preserved"', args=[d.arg(name='appendSlice', type=d.T.boolean)]), + withAppendSlice(appendSlice): { policy+: { mergeOptions+: { appendSlice: appendSlice } } }, + '#withKeepMapValues':: d.fn(help='"Specifies that already existing values in a merged map should be preserved"', args=[d.arg(name='keepMapValues', type=d.T.boolean)]), + withKeepMapValues(keepMapValues): { policy+: { mergeOptions+: { keepMapValues: keepMapValues } } }, + }, + '#withFromFieldPath':: d.fn(help="\"FromFieldPath specifies how to patch from a field path. The default is\\n'Optional', which means the patch will be a no-op if the specified\\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\\nthe specified path does not exist.\"", args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { policy+: { fromFieldPath: fromFieldPath } }, + }, + '#transforms':: d.obj(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."'), + transforms: { + '#convert':: d.obj(help='"Convert is used to cast the input into the given output type."'), + convert: { + '#withFormat':: d.fn(help='"The expected input format.\\n\\n\\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\\nOnly used during `string -> float64` conversions.\\n* `json` - parses the input as a JSON string.\\nOnly used during `string -> object` or `string -> list` conversions.\\n\\n\\nIf this property is null, the default conversion is applied."', args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { convert+: { format: format } }, + '#withToType':: d.fn(help='"ToType is the type of the output of this transform."', args=[d.arg(name='toType', type=d.T.string)]), + withToType(toType): { convert+: { toType: toType } }, + }, + '#match':: d.obj(help='"Match is a more complex version of Map that matches a list of patterns."'), + match: { + '#patterns':: d.obj(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."'), + patterns: { + '#withLiteral':: d.fn(help='"Literal exactly matches the input string (case sensitive).\\nIs required if `type` is `literal`."', args=[d.arg(name='literal', type=d.T.string)]), + withLiteral(literal): { literal: literal }, + '#withRegexp':: d.fn(help='"Regexp to match against the input string.\\nIs required if `type` is `regexp`."', args=[d.arg(name='regexp', type=d.T.string)]), + withRegexp(regexp): { regexp: regexp }, + '#withResult':: d.fn(help='"The value that is used as result of the transform if the pattern matches."', args=[d.arg(name='result', type=d.T.any)]), + withResult(result): { result: result }, + '#withType':: d.fn(help='"Type specifies how the pattern matches the input.\\n\\n\\n* `literal` - the pattern value has to exactly match (case sensitive) the\\ninput string. This is the default.\\n\\n\\n* `regexp` - the pattern treated as a regular expression against\\nwhich the input string is tested. Crossplane will throw an error if the\\nkey is not a valid regexp."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFallbackTo':: d.fn(help='"Determines to what value the transform should fallback if no pattern matches."', args=[d.arg(name='fallbackTo', type=d.T.string)]), + withFallbackTo(fallbackTo): { match+: { fallbackTo: fallbackTo } }, + '#withFallbackValue':: d.fn(help='"The fallback value that should be returned by the transform if now pattern\\nmatches."', args=[d.arg(name='fallbackValue', type=d.T.any)]), + withFallbackValue(fallbackValue): { match+: { fallbackValue: fallbackValue } }, + '#withPatterns':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."', args=[d.arg(name='patterns', type=d.T.array)]), + withPatterns(patterns): { match+: { patterns: if std.isArray(v=patterns) then patterns else [patterns] } }, + '#withPatternsMixin':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patterns', type=d.T.array)]), + withPatternsMixin(patterns): { match+: { patterns+: if std.isArray(v=patterns) then patterns else [patterns] } }, + }, + '#math':: d.obj(help='"Math is used to transform the input via mathematical operations such as\\nmultiplication."'), + math: { + '#withClampMax':: d.fn(help='"ClampMax makes sure that the value is not bigger than the given value."', args=[d.arg(name='clampMax', type=d.T.integer)]), + withClampMax(clampMax): { math+: { clampMax: clampMax } }, + '#withClampMin':: d.fn(help='"ClampMin makes sure that the value is not smaller than the given value."', args=[d.arg(name='clampMin', type=d.T.integer)]), + withClampMin(clampMin): { math+: { clampMin: clampMin } }, + '#withMultiply':: d.fn(help='"Multiply the value."', args=[d.arg(name='multiply', type=d.T.integer)]), + withMultiply(multiply): { math+: { multiply: multiply } }, + '#withType':: d.fn(help='"Type of the math transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { math+: { type: type } }, + }, + '#string':: d.obj(help='"String is used to transform the input into a string or a different kind\\nof string. Note that the input does not necessarily need to be a string."'), + string: { + '#join':: d.obj(help='"Join defines parameters to join a slice of values to a string."'), + join: { + '#withSeparator':: d.fn(help='"Separator defines the character that should separate the values from each\\nother in the joined string."', args=[d.arg(name='separator', type=d.T.string)]), + withSeparator(separator): { string+: { join+: { separator: separator } } }, + }, + '#regexp':: d.obj(help='"Extract a match from the input using a regular expression."'), + regexp: { + '#withGroup':: d.fn(help='"Group number to match. 0 (the default) matches the entire expression."', args=[d.arg(name='group', type=d.T.integer)]), + withGroup(group): { string+: { regexp+: { group: group } } }, + '#withMatch':: d.fn(help='"Match string. May optionally include submatches, aka capture groups.\\nSee https://pkg.go.dev/regexp/ for details."', args=[d.arg(name='match', type=d.T.string)]), + withMatch(match): { string+: { regexp+: { match: match } } }, + }, + '#withConvert':: d.fn(help='"Optional conversion method to be specified.\\n`ToUpper` and `ToLower` change the letter case of the input string.\\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\\n`ToJson` converts any input value into its raw JSON representation.\\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\\nconverted to JSON.\\n`ToAdler32` generate a addler32 hash based on the input string."', args=[d.arg(name='convert', type=d.T.string)]), + withConvert(convert): { string+: { convert: convert } }, + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { string+: { fmt: fmt } }, + '#withTrim':: d.fn(help='"Trim the prefix or suffix from the input"', args=[d.arg(name='trim', type=d.T.string)]), + withTrim(trim): { string+: { trim: trim } }, + '#withType':: d.fn(help='"Type of the string transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { string+: { type: type } }, + }, + '#withMap':: d.fn(help='"Map uses the input as a key in the given map and returns the value."', args=[d.arg(name='map', type=d.T.object)]), + withMap(map): { map: map }, + '#withMapMixin':: d.fn(help='"Map uses the input as a key in the given map and returns the value."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='map', type=d.T.object)]), + withMapMixin(map): { map+: map }, + '#withType':: d.fn(help='"Type of the transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the resource whose value is\\nto be used as input. Required when type is FromCompositeFieldPath or\\nToCompositeFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath is the path of the field on the resource whose value will\\nbe changed with the result of transforms. Leave empty if you'd like to\\npropagate to the same path as fromFieldPath.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { toFieldPath: toFieldPath }, + '#withTransforms':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."', args=[d.arg(name='transforms', type=d.T.array)]), + withTransforms(transforms): { transforms: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withTransformsMixin':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='transforms', type=d.T.array)]), + withTransformsMixin(transforms): { transforms+: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withType':: d.fn(help='"Type sets the patching behaviour to be used. Each patch type may require\\nits own fields to be set on the Patch object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#policy':: d.obj(help='"Policy represents the Resolve and Resolution policies which apply to\\nall EnvironmentSourceReferences in EnvironmentConfigs list."'), + policy: { + '#withResolution':: d.fn(help="\"Resolution specifies whether resolution of this reference is required.\\nThe default is 'Required', which means the reconcile will fail if the\\nreference cannot be resolved. 'Optional' means this reference will be\\na no-op if it cannot be resolved.\"", args=[d.arg(name='resolution', type=d.T.string)]), + withResolution(resolution): { spec+: { environment+: { policy+: { resolution: resolution } } } }, + '#withResolve':: d.fn(help="\"Resolve specifies when this reference should be resolved. The default\\nis 'IfNotPresent', which will attempt to resolve the reference only when\\nthe corresponding field is not present. Use 'Always' to resolve the\\nreference on every reconcile.\"", args=[d.arg(name='resolve', type=d.T.string)]), + withResolve(resolve): { spec+: { environment+: { policy+: { resolve: resolve } } } }, + }, + '#withDefaultData':: d.fn(help='"DefaultData statically defines the initial state of the environment.\\nIt has the same schema-less structure as the data field in\\nenvironment configs.\\nIt is overwritten by the selected environment configs."', args=[d.arg(name='defaultData', type=d.T.object)]), + withDefaultData(defaultData): { spec+: { environment+: { defaultData: defaultData } } }, + '#withDefaultDataMixin':: d.fn(help='"DefaultData statically defines the initial state of the environment.\\nIt has the same schema-less structure as the data field in\\nenvironment configs.\\nIt is overwritten by the selected environment configs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='defaultData', type=d.T.object)]), + withDefaultDataMixin(defaultData): { spec+: { environment+: { defaultData+: defaultData } } }, + '#withEnvironmentConfigs':: d.fn(help='"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\\nresources are stored in the composite resource at\\n`spec.environmentConfigRefs` and is only updated if it is null.\\n\\n\\nThe list of references is used to compute an in-memory environment at\\ncompose time. The data of all object is merged in the order they are\\nlisted, meaning the values of EnvironmentConfigs with a larger index take\\npriority over ones with smaller indices.\\n\\n\\nThe computed environment can be accessed in a composition using\\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches."', args=[d.arg(name='environmentConfigs', type=d.T.array)]), + withEnvironmentConfigs(environmentConfigs): { spec+: { environment+: { environmentConfigs: if std.isArray(v=environmentConfigs) then environmentConfigs else [environmentConfigs] } } }, + '#withEnvironmentConfigsMixin':: d.fn(help='"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\\nresources are stored in the composite resource at\\n`spec.environmentConfigRefs` and is only updated if it is null.\\n\\n\\nThe list of references is used to compute an in-memory environment at\\ncompose time. The data of all object is merged in the order they are\\nlisted, meaning the values of EnvironmentConfigs with a larger index take\\npriority over ones with smaller indices.\\n\\n\\nThe computed environment can be accessed in a composition using\\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='environmentConfigs', type=d.T.array)]), + withEnvironmentConfigsMixin(environmentConfigs): { spec+: { environment+: { environmentConfigs+: if std.isArray(v=environmentConfigs) then environmentConfigs else [environmentConfigs] } } }, + '#withPatches':: d.fn(help="\"Patches is a list of environment patches that are executed before a\\ncomposition's resources are composed.\"", args=[d.arg(name='patches', type=d.T.array)]), + withPatches(patches): { spec+: { environment+: { patches: if std.isArray(v=patches) then patches else [patches] } } }, + '#withPatchesMixin':: d.fn(help="\"Patches is a list of environment patches that are executed before a\\ncomposition's resources are composed.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='patches', type=d.T.array)]), + withPatchesMixin(patches): { spec+: { environment+: { patches+: if std.isArray(v=patches) then patches else [patches] } } }, + }, + '#patchSets':: d.obj(help='"PatchSets define a named set of patches that may be included by any\\nresource in this Composition. PatchSets cannot themselves refer to other\\nPatchSets.\\n\\n\\nPatchSets are only used by the \\"Resources\\" mode of Composition. They\\nare ignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."'), + patchSets: { + '#patches':: d.obj(help='"Patches will be applied as an overlay to the base resource."'), + patches: { + '#combine':: d.obj(help='"Combine is the patch configuration for a CombineFromComposite,\\nCombineFromEnvironment, CombineToComposite or CombineToEnvironment patch."'), + combine: { + '#string':: d.obj(help='"String declares that input variables should be combined into a single\\nstring, using the relevant settings for formatting purposes."'), + string: { + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { combine+: { string+: { fmt: fmt } } }, + }, + '#variables':: d.obj(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."'), + variables: { + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the source whose value is\\nto be used as input."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + }, + '#withStrategy':: d.fn(help='"Strategy defines the strategy to use to combine the input variable values.\\nCurrently only string is supported."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { combine+: { strategy: strategy } }, + '#withVariables':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { combine+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { combine+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#policy':: d.obj(help='"Policy configures the specifics of patching behaviour."'), + policy: { + '#mergeOptions':: d.obj(help='"MergeOptions Specifies merge options on a field path."'), + mergeOptions: { + '#withAppendSlice':: d.fn(help='"Specifies that already existing elements in a merged slice should be preserved"', args=[d.arg(name='appendSlice', type=d.T.boolean)]), + withAppendSlice(appendSlice): { policy+: { mergeOptions+: { appendSlice: appendSlice } } }, + '#withKeepMapValues':: d.fn(help='"Specifies that already existing values in a merged map should be preserved"', args=[d.arg(name='keepMapValues', type=d.T.boolean)]), + withKeepMapValues(keepMapValues): { policy+: { mergeOptions+: { keepMapValues: keepMapValues } } }, + }, + '#withFromFieldPath':: d.fn(help="\"FromFieldPath specifies how to patch from a field path. The default is\\n'Optional', which means the patch will be a no-op if the specified\\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\\nthe specified path does not exist.\"", args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { policy+: { fromFieldPath: fromFieldPath } }, + }, + '#transforms':: d.obj(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."'), + transforms: { + '#convert':: d.obj(help='"Convert is used to cast the input into the given output type."'), + convert: { + '#withFormat':: d.fn(help='"The expected input format.\\n\\n\\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\\nOnly used during `string -> float64` conversions.\\n* `json` - parses the input as a JSON string.\\nOnly used during `string -> object` or `string -> list` conversions.\\n\\n\\nIf this property is null, the default conversion is applied."', args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { convert+: { format: format } }, + '#withToType':: d.fn(help='"ToType is the type of the output of this transform."', args=[d.arg(name='toType', type=d.T.string)]), + withToType(toType): { convert+: { toType: toType } }, + }, + '#match':: d.obj(help='"Match is a more complex version of Map that matches a list of patterns."'), + match: { + '#patterns':: d.obj(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."'), + patterns: { + '#withLiteral':: d.fn(help='"Literal exactly matches the input string (case sensitive).\\nIs required if `type` is `literal`."', args=[d.arg(name='literal', type=d.T.string)]), + withLiteral(literal): { literal: literal }, + '#withRegexp':: d.fn(help='"Regexp to match against the input string.\\nIs required if `type` is `regexp`."', args=[d.arg(name='regexp', type=d.T.string)]), + withRegexp(regexp): { regexp: regexp }, + '#withResult':: d.fn(help='"The value that is used as result of the transform if the pattern matches."', args=[d.arg(name='result', type=d.T.any)]), + withResult(result): { result: result }, + '#withType':: d.fn(help='"Type specifies how the pattern matches the input.\\n\\n\\n* `literal` - the pattern value has to exactly match (case sensitive) the\\ninput string. This is the default.\\n\\n\\n* `regexp` - the pattern treated as a regular expression against\\nwhich the input string is tested. Crossplane will throw an error if the\\nkey is not a valid regexp."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFallbackTo':: d.fn(help='"Determines to what value the transform should fallback if no pattern matches."', args=[d.arg(name='fallbackTo', type=d.T.string)]), + withFallbackTo(fallbackTo): { match+: { fallbackTo: fallbackTo } }, + '#withFallbackValue':: d.fn(help='"The fallback value that should be returned by the transform if now pattern\\nmatches."', args=[d.arg(name='fallbackValue', type=d.T.any)]), + withFallbackValue(fallbackValue): { match+: { fallbackValue: fallbackValue } }, + '#withPatterns':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."', args=[d.arg(name='patterns', type=d.T.array)]), + withPatterns(patterns): { match+: { patterns: if std.isArray(v=patterns) then patterns else [patterns] } }, + '#withPatternsMixin':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patterns', type=d.T.array)]), + withPatternsMixin(patterns): { match+: { patterns+: if std.isArray(v=patterns) then patterns else [patterns] } }, + }, + '#math':: d.obj(help='"Math is used to transform the input via mathematical operations such as\\nmultiplication."'), + math: { + '#withClampMax':: d.fn(help='"ClampMax makes sure that the value is not bigger than the given value."', args=[d.arg(name='clampMax', type=d.T.integer)]), + withClampMax(clampMax): { math+: { clampMax: clampMax } }, + '#withClampMin':: d.fn(help='"ClampMin makes sure that the value is not smaller than the given value."', args=[d.arg(name='clampMin', type=d.T.integer)]), + withClampMin(clampMin): { math+: { clampMin: clampMin } }, + '#withMultiply':: d.fn(help='"Multiply the value."', args=[d.arg(name='multiply', type=d.T.integer)]), + withMultiply(multiply): { math+: { multiply: multiply } }, + '#withType':: d.fn(help='"Type of the math transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { math+: { type: type } }, + }, + '#string':: d.obj(help='"String is used to transform the input into a string or a different kind\\nof string. Note that the input does not necessarily need to be a string."'), + string: { + '#join':: d.obj(help='"Join defines parameters to join a slice of values to a string."'), + join: { + '#withSeparator':: d.fn(help='"Separator defines the character that should separate the values from each\\nother in the joined string."', args=[d.arg(name='separator', type=d.T.string)]), + withSeparator(separator): { string+: { join+: { separator: separator } } }, + }, + '#regexp':: d.obj(help='"Extract a match from the input using a regular expression."'), + regexp: { + '#withGroup':: d.fn(help='"Group number to match. 0 (the default) matches the entire expression."', args=[d.arg(name='group', type=d.T.integer)]), + withGroup(group): { string+: { regexp+: { group: group } } }, + '#withMatch':: d.fn(help='"Match string. May optionally include submatches, aka capture groups.\\nSee https://pkg.go.dev/regexp/ for details."', args=[d.arg(name='match', type=d.T.string)]), + withMatch(match): { string+: { regexp+: { match: match } } }, + }, + '#withConvert':: d.fn(help='"Optional conversion method to be specified.\\n`ToUpper` and `ToLower` change the letter case of the input string.\\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\\n`ToJson` converts any input value into its raw JSON representation.\\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\\nconverted to JSON.\\n`ToAdler32` generate a addler32 hash based on the input string."', args=[d.arg(name='convert', type=d.T.string)]), + withConvert(convert): { string+: { convert: convert } }, + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { string+: { fmt: fmt } }, + '#withTrim':: d.fn(help='"Trim the prefix or suffix from the input"', args=[d.arg(name='trim', type=d.T.string)]), + withTrim(trim): { string+: { trim: trim } }, + '#withType':: d.fn(help='"Type of the string transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { string+: { type: type } }, + }, + '#withMap':: d.fn(help='"Map uses the input as a key in the given map and returns the value."', args=[d.arg(name='map', type=d.T.object)]), + withMap(map): { map: map }, + '#withMapMixin':: d.fn(help='"Map uses the input as a key in the given map and returns the value."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='map', type=d.T.object)]), + withMapMixin(map): { map+: map }, + '#withType':: d.fn(help='"Type of the transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the resource whose value is\\nto be used as input. Required when type is FromCompositeFieldPath,\\nFromEnvironmentFieldPath, ToCompositeFieldPath, ToEnvironmentFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withPatchSetName':: d.fn(help='"PatchSetName to include patches from. Required when type is PatchSet."', args=[d.arg(name='patchSetName', type=d.T.string)]), + withPatchSetName(patchSetName): { patchSetName: patchSetName }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath is the path of the field on the resource whose value will\\nbe changed with the result of transforms. Leave empty if you'd like to\\npropagate to the same path as fromFieldPath.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { toFieldPath: toFieldPath }, + '#withTransforms':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."', args=[d.arg(name='transforms', type=d.T.array)]), + withTransforms(transforms): { transforms: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withTransformsMixin':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='transforms', type=d.T.array)]), + withTransformsMixin(transforms): { transforms+: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withType':: d.fn(help='"Type sets the patching behaviour to be used. Each patch type may require\\nits own fields to be set on the Patch object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withName':: d.fn(help='"Name of this PatchSet."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPatches':: d.fn(help='"Patches will be applied as an overlay to the base resource."', args=[d.arg(name='patches', type=d.T.array)]), + withPatches(patches): { patches: if std.isArray(v=patches) then patches else [patches] }, + '#withPatchesMixin':: d.fn(help='"Patches will be applied as an overlay to the base resource."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patches', type=d.T.array)]), + withPatchesMixin(patches): { patches+: if std.isArray(v=patches) then patches else [patches] }, + }, + '#pipeline':: d.obj(help='"Pipeline is a list of composition function steps that will be used when a\\ncomposite resource referring to this composition is created. One of\\nresources and pipeline must be specified - you cannot specify both.\\n\\n\\nThe Pipeline is only used by the \\"Pipeline\\" mode of Composition. It is\\nignored by other modes."'), + pipeline: { + '#credentials':: d.obj(help='"Credentials are optional credentials that the Composition Function needs."'), + credentials: { + '#secretRef':: d.obj(help='"A SecretRef is a reference to a secret containing credentials that should\\nbe supplied to the function."'), + secretRef: { + '#withName':: d.fn(help='"Name of the secret."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace of the secret."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { secretRef+: { namespace: namespace } }, + }, + '#withName':: d.fn(help='"Name of this set of credentials."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withSource':: d.fn(help='"Source of the function credentials."', args=[d.arg(name='source', type=d.T.string)]), + withSource(source): { source: source }, + }, + '#functionRef':: d.obj(help='"FunctionRef is a reference to the Composition Function this step should\\nexecute."'), + functionRef: { + '#withName':: d.fn(help='"Name of the referenced Function."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { functionRef+: { name: name } }, + }, + '#withCredentials':: d.fn(help='"Credentials are optional credentials that the Composition Function needs."', args=[d.arg(name='credentials', type=d.T.array)]), + withCredentials(credentials): { credentials: if std.isArray(v=credentials) then credentials else [credentials] }, + '#withCredentialsMixin':: d.fn(help='"Credentials are optional credentials that the Composition Function needs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='credentials', type=d.T.array)]), + withCredentialsMixin(credentials): { credentials+: if std.isArray(v=credentials) then credentials else [credentials] }, + '#withInput':: d.fn(help="\"Input is an optional, arbitrary Kubernetes resource (i.e. a resource\\nwith an apiVersion and kind) that will be passed to the Composition\\nFunction as the 'input' of its RunFunctionRequest.\"", args=[d.arg(name='input', type=d.T.object)]), + withInput(input): { input: input }, + '#withInputMixin':: d.fn(help="\"Input is an optional, arbitrary Kubernetes resource (i.e. a resource\\nwith an apiVersion and kind) that will be passed to the Composition\\nFunction as the 'input' of its RunFunctionRequest.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='input', type=d.T.object)]), + withInputMixin(input): { input+: input }, + '#withStep':: d.fn(help='"Step name. Must be unique within its Pipeline."', args=[d.arg(name='step', type=d.T.string)]), + withStep(step): { step: step }, + }, + '#publishConnectionDetailsWithStoreConfigRef':: d.obj(help='"PublishConnectionDetailsWithStoreConfig specifies the secret store config\\nwith which the connection details of composite resources dynamically\\nprovisioned using this composition will be published.\\n\\n\\nTHIS IS AN ALPHA FIELD. Do not use it in production. It is not honored\\nunless the relevant Crossplane feature flag is enabled, and may be\\nchanged or removed without notice."'), + publishConnectionDetailsWithStoreConfigRef: { + '#withName':: d.fn(help='"Name of the referenced StoreConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { publishConnectionDetailsWithStoreConfigRef+: { name: name } } }, + }, + '#resources':: d.obj(help='"Resources is a list of resource templates that will be used when a\\ncomposite resource referring to this composition is created.\\n\\n\\nResources are only used by the \\"Resources\\" mode of Composition. They are\\nignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."'), + resources: { + '#connectionDetails':: d.obj(help='"ConnectionDetails lists the propagation secret keys from this target\\nresource to the composition instance connection secret."'), + connectionDetails: { + '#withFromConnectionSecretKey':: d.fn(help="\"FromConnectionSecretKey is the key that will be used to fetch the value\\nfrom the composed resource's connection secret.\"", args=[d.arg(name='fromConnectionSecretKey', type=d.T.string)]), + withFromConnectionSecretKey(fromConnectionSecretKey): { fromConnectionSecretKey: fromConnectionSecretKey }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the composed resource whose\\nvalue to be used as input. Name must be specified if the type is\\nFromFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withName':: d.fn(help="\"Name of the connection secret key that will be propagated to the\\nconnection secret of the composition instance. Leave empty if you'd like\\nto use the same key name.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withType':: d.fn(help='"Type sets the connection detail fetching behaviour to be used. Each\\nconnection detail type may require its own fields to be set on the\\nConnectionDetail object. If the type is omitted Crossplane will attempt\\nto infer it based on which other fields were specified. If multiple\\nfields are specified the order of precedence is:\\n1. FromValue\\n2. FromConnectionSecretKey\\n3. FromFieldPath"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#withValue':: d.fn(help='"Value that will be propagated to the connection secret of the composite\\nresource. May be set to inject a fixed, non-sensitive connection secret\\nvalue, for example a well-known port."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#patches':: d.obj(help='"Patches will be applied as overlay to the base resource."'), + patches: { + '#combine':: d.obj(help='"Combine is the patch configuration for a CombineFromComposite,\\nCombineFromEnvironment, CombineToComposite or CombineToEnvironment patch."'), + combine: { + '#string':: d.obj(help='"String declares that input variables should be combined into a single\\nstring, using the relevant settings for formatting purposes."'), + string: { + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { combine+: { string+: { fmt: fmt } } }, + }, + '#variables':: d.obj(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."'), + variables: { + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the source whose value is\\nto be used as input."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + }, + '#withStrategy':: d.fn(help='"Strategy defines the strategy to use to combine the input variable values.\\nCurrently only string is supported."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { combine+: { strategy: strategy } }, + '#withVariables':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { combine+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { combine+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#policy':: d.obj(help='"Policy configures the specifics of patching behaviour."'), + policy: { + '#mergeOptions':: d.obj(help='"MergeOptions Specifies merge options on a field path."'), + mergeOptions: { + '#withAppendSlice':: d.fn(help='"Specifies that already existing elements in a merged slice should be preserved"', args=[d.arg(name='appendSlice', type=d.T.boolean)]), + withAppendSlice(appendSlice): { policy+: { mergeOptions+: { appendSlice: appendSlice } } }, + '#withKeepMapValues':: d.fn(help='"Specifies that already existing values in a merged map should be preserved"', args=[d.arg(name='keepMapValues', type=d.T.boolean)]), + withKeepMapValues(keepMapValues): { policy+: { mergeOptions+: { keepMapValues: keepMapValues } } }, + }, + '#withFromFieldPath':: d.fn(help="\"FromFieldPath specifies how to patch from a field path. The default is\\n'Optional', which means the patch will be a no-op if the specified\\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\\nthe specified path does not exist.\"", args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { policy+: { fromFieldPath: fromFieldPath } }, + }, + '#transforms':: d.obj(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."'), + transforms: { + '#convert':: d.obj(help='"Convert is used to cast the input into the given output type."'), + convert: { + '#withFormat':: d.fn(help='"The expected input format.\\n\\n\\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\\nOnly used during `string -> float64` conversions.\\n* `json` - parses the input as a JSON string.\\nOnly used during `string -> object` or `string -> list` conversions.\\n\\n\\nIf this property is null, the default conversion is applied."', args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { convert+: { format: format } }, + '#withToType':: d.fn(help='"ToType is the type of the output of this transform."', args=[d.arg(name='toType', type=d.T.string)]), + withToType(toType): { convert+: { toType: toType } }, + }, + '#match':: d.obj(help='"Match is a more complex version of Map that matches a list of patterns."'), + match: { + '#patterns':: d.obj(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."'), + patterns: { + '#withLiteral':: d.fn(help='"Literal exactly matches the input string (case sensitive).\\nIs required if `type` is `literal`."', args=[d.arg(name='literal', type=d.T.string)]), + withLiteral(literal): { literal: literal }, + '#withRegexp':: d.fn(help='"Regexp to match against the input string.\\nIs required if `type` is `regexp`."', args=[d.arg(name='regexp', type=d.T.string)]), + withRegexp(regexp): { regexp: regexp }, + '#withResult':: d.fn(help='"The value that is used as result of the transform if the pattern matches."', args=[d.arg(name='result', type=d.T.any)]), + withResult(result): { result: result }, + '#withType':: d.fn(help='"Type specifies how the pattern matches the input.\\n\\n\\n* `literal` - the pattern value has to exactly match (case sensitive) the\\ninput string. This is the default.\\n\\n\\n* `regexp` - the pattern treated as a regular expression against\\nwhich the input string is tested. Crossplane will throw an error if the\\nkey is not a valid regexp."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFallbackTo':: d.fn(help='"Determines to what value the transform should fallback if no pattern matches."', args=[d.arg(name='fallbackTo', type=d.T.string)]), + withFallbackTo(fallbackTo): { match+: { fallbackTo: fallbackTo } }, + '#withFallbackValue':: d.fn(help='"The fallback value that should be returned by the transform if now pattern\\nmatches."', args=[d.arg(name='fallbackValue', type=d.T.any)]), + withFallbackValue(fallbackValue): { match+: { fallbackValue: fallbackValue } }, + '#withPatterns':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."', args=[d.arg(name='patterns', type=d.T.array)]), + withPatterns(patterns): { match+: { patterns: if std.isArray(v=patterns) then patterns else [patterns] } }, + '#withPatternsMixin':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patterns', type=d.T.array)]), + withPatternsMixin(patterns): { match+: { patterns+: if std.isArray(v=patterns) then patterns else [patterns] } }, + }, + '#math':: d.obj(help='"Math is used to transform the input via mathematical operations such as\\nmultiplication."'), + math: { + '#withClampMax':: d.fn(help='"ClampMax makes sure that the value is not bigger than the given value."', args=[d.arg(name='clampMax', type=d.T.integer)]), + withClampMax(clampMax): { math+: { clampMax: clampMax } }, + '#withClampMin':: d.fn(help='"ClampMin makes sure that the value is not smaller than the given value."', args=[d.arg(name='clampMin', type=d.T.integer)]), + withClampMin(clampMin): { math+: { clampMin: clampMin } }, + '#withMultiply':: d.fn(help='"Multiply the value."', args=[d.arg(name='multiply', type=d.T.integer)]), + withMultiply(multiply): { math+: { multiply: multiply } }, + '#withType':: d.fn(help='"Type of the math transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { math+: { type: type } }, + }, + '#string':: d.obj(help='"String is used to transform the input into a string or a different kind\\nof string. Note that the input does not necessarily need to be a string."'), + string: { + '#join':: d.obj(help='"Join defines parameters to join a slice of values to a string."'), + join: { + '#withSeparator':: d.fn(help='"Separator defines the character that should separate the values from each\\nother in the joined string."', args=[d.arg(name='separator', type=d.T.string)]), + withSeparator(separator): { string+: { join+: { separator: separator } } }, + }, + '#regexp':: d.obj(help='"Extract a match from the input using a regular expression."'), + regexp: { + '#withGroup':: d.fn(help='"Group number to match. 0 (the default) matches the entire expression."', args=[d.arg(name='group', type=d.T.integer)]), + withGroup(group): { string+: { regexp+: { group: group } } }, + '#withMatch':: d.fn(help='"Match string. May optionally include submatches, aka capture groups.\\nSee https://pkg.go.dev/regexp/ for details."', args=[d.arg(name='match', type=d.T.string)]), + withMatch(match): { string+: { regexp+: { match: match } } }, + }, + '#withConvert':: d.fn(help='"Optional conversion method to be specified.\\n`ToUpper` and `ToLower` change the letter case of the input string.\\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\\n`ToJson` converts any input value into its raw JSON representation.\\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\\nconverted to JSON.\\n`ToAdler32` generate a addler32 hash based on the input string."', args=[d.arg(name='convert', type=d.T.string)]), + withConvert(convert): { string+: { convert: convert } }, + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { string+: { fmt: fmt } }, + '#withTrim':: d.fn(help='"Trim the prefix or suffix from the input"', args=[d.arg(name='trim', type=d.T.string)]), + withTrim(trim): { string+: { trim: trim } }, + '#withType':: d.fn(help='"Type of the string transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { string+: { type: type } }, + }, + '#withMap':: d.fn(help='"Map uses the input as a key in the given map and returns the value."', args=[d.arg(name='map', type=d.T.object)]), + withMap(map): { map: map }, + '#withMapMixin':: d.fn(help='"Map uses the input as a key in the given map and returns the value."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='map', type=d.T.object)]), + withMapMixin(map): { map+: map }, + '#withType':: d.fn(help='"Type of the transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the resource whose value is\\nto be used as input. Required when type is FromCompositeFieldPath,\\nFromEnvironmentFieldPath, ToCompositeFieldPath, ToEnvironmentFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withPatchSetName':: d.fn(help='"PatchSetName to include patches from. Required when type is PatchSet."', args=[d.arg(name='patchSetName', type=d.T.string)]), + withPatchSetName(patchSetName): { patchSetName: patchSetName }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath is the path of the field on the resource whose value will\\nbe changed with the result of transforms. Leave empty if you'd like to\\npropagate to the same path as fromFieldPath.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { toFieldPath: toFieldPath }, + '#withTransforms':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."', args=[d.arg(name='transforms', type=d.T.array)]), + withTransforms(transforms): { transforms: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withTransformsMixin':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='transforms', type=d.T.array)]), + withTransformsMixin(transforms): { transforms+: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withType':: d.fn(help='"Type sets the patching behaviour to be used. Each patch type may require\\nits own fields to be set on the Patch object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#readinessChecks':: d.obj(help='"ReadinessChecks allows users to define custom readiness checks. All checks\\nhave to return true in order for resource to be considered ready. The\\ndefault readiness check is to have the \\"Ready\\" condition to be \\"True\\"."'), + readinessChecks: { + '#matchCondition':: d.obj(help="\"MatchCondition specifies the condition you'd like to match if you're using \\\"MatchCondition\\\" type.\""), + matchCondition: { + '#withType':: d.fn(help="\"Type indicates the type of condition you'd like to use.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { matchCondition+: { type: type } }, + }, + '#withFieldPath':: d.fn(help='"FieldPath shows the path of the field whose value will be used."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { fieldPath: fieldPath }, + '#withMatchInteger':: d.fn(help="\"MatchInt is the value you'd like to match if you're using \\\"MatchInt\\\" type.\"", args=[d.arg(name='matchInteger', type=d.T.integer)]), + withMatchInteger(matchInteger): { matchInteger: matchInteger }, + '#withMatchString':: d.fn(help="\"MatchString is the value you'd like to match if you're using \\\"MatchString\\\" type.\"", args=[d.arg(name='matchString', type=d.T.string)]), + withMatchString(matchString): { matchString: matchString }, + '#withType':: d.fn(help="\"Type indicates the type of probe you'd like to use.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withBase':: d.fn(help='"Base is the target resource that the patches will be applied on."', args=[d.arg(name='base', type=d.T.object)]), + withBase(base): { base: base }, + '#withBaseMixin':: d.fn(help='"Base is the target resource that the patches will be applied on."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='base', type=d.T.object)]), + withBaseMixin(base): { base+: base }, + '#withConnectionDetails':: d.fn(help='"ConnectionDetails lists the propagation secret keys from this target\\nresource to the composition instance connection secret."', args=[d.arg(name='connectionDetails', type=d.T.array)]), + withConnectionDetails(connectionDetails): { connectionDetails: if std.isArray(v=connectionDetails) then connectionDetails else [connectionDetails] }, + '#withConnectionDetailsMixin':: d.fn(help='"ConnectionDetails lists the propagation secret keys from this target\\nresource to the composition instance connection secret."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='connectionDetails', type=d.T.array)]), + withConnectionDetailsMixin(connectionDetails): { connectionDetails+: if std.isArray(v=connectionDetails) then connectionDetails else [connectionDetails] }, + '#withName':: d.fn(help="\"A Name uniquely identifies this entry within its Composition's resources\\narray. Names are optional but *strongly* recommended. When all entries in\\nthe resources array are named entries may added, deleted, and reordered\\nas long as their names do not change. When entries are not named the\\nlength and order of the resources array should be treated as immutable.\\nEither all or no entries must be named.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPatches':: d.fn(help='"Patches will be applied as overlay to the base resource."', args=[d.arg(name='patches', type=d.T.array)]), + withPatches(patches): { patches: if std.isArray(v=patches) then patches else [patches] }, + '#withPatchesMixin':: d.fn(help='"Patches will be applied as overlay to the base resource."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patches', type=d.T.array)]), + withPatchesMixin(patches): { patches+: if std.isArray(v=patches) then patches else [patches] }, + '#withReadinessChecks':: d.fn(help='"ReadinessChecks allows users to define custom readiness checks. All checks\\nhave to return true in order for resource to be considered ready. The\\ndefault readiness check is to have the \\"Ready\\" condition to be \\"True\\"."', args=[d.arg(name='readinessChecks', type=d.T.array)]), + withReadinessChecks(readinessChecks): { readinessChecks: if std.isArray(v=readinessChecks) then readinessChecks else [readinessChecks] }, + '#withReadinessChecksMixin':: d.fn(help='"ReadinessChecks allows users to define custom readiness checks. All checks\\nhave to return true in order for resource to be considered ready. The\\ndefault readiness check is to have the \\"Ready\\" condition to be \\"True\\"."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessChecks', type=d.T.array)]), + withReadinessChecksMixin(readinessChecks): { readinessChecks+: if std.isArray(v=readinessChecks) then readinessChecks else [readinessChecks] }, + }, + '#withMode':: d.fn(help="\"Mode controls what type or \\\"mode\\\" of Composition will be used.\\n\\n\\n\\\"Pipeline\\\" indicates that a Composition specifies a pipeline of\\nComposition Functions, each of which is responsible for producing\\ncomposed resources that Crossplane should create or update.\\n\\n\\n\\\"Resources\\\" indicates that a Composition uses what is commonly referred\\nto as \\\"Patch \u0026 Transform\\\" or P\u0026T composition. This mode of Composition\\nuses an array of resources, each a template for a composed resource.\\n\\n\\nAll Compositions should use Pipeline mode. Resources mode is deprecated.\\nResources mode won't be removed in Crossplane 1.x, and will remain the\\ndefault to avoid breaking legacy Compositions. However, it's no longer\\naccepting new features, and only accepting security related bug fixes.\"", args=[d.arg(name='mode', type=d.T.string)]), + withMode(mode): { spec+: { mode: mode } }, + '#withPatchSets':: d.fn(help='"PatchSets define a named set of patches that may be included by any\\nresource in this Composition. PatchSets cannot themselves refer to other\\nPatchSets.\\n\\n\\nPatchSets are only used by the \\"Resources\\" mode of Composition. They\\nare ignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."', args=[d.arg(name='patchSets', type=d.T.array)]), + withPatchSets(patchSets): { spec+: { patchSets: if std.isArray(v=patchSets) then patchSets else [patchSets] } }, + '#withPatchSetsMixin':: d.fn(help='"PatchSets define a named set of patches that may be included by any\\nresource in this Composition. PatchSets cannot themselves refer to other\\nPatchSets.\\n\\n\\nPatchSets are only used by the \\"Resources\\" mode of Composition. They\\nare ignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patchSets', type=d.T.array)]), + withPatchSetsMixin(patchSets): { spec+: { patchSets+: if std.isArray(v=patchSets) then patchSets else [patchSets] } }, + '#withPipeline':: d.fn(help='"Pipeline is a list of composition function steps that will be used when a\\ncomposite resource referring to this composition is created. One of\\nresources and pipeline must be specified - you cannot specify both.\\n\\n\\nThe Pipeline is only used by the \\"Pipeline\\" mode of Composition. It is\\nignored by other modes."', args=[d.arg(name='pipeline', type=d.T.array)]), + withPipeline(pipeline): { spec+: { pipeline: if std.isArray(v=pipeline) then pipeline else [pipeline] } }, + '#withPipelineMixin':: d.fn(help='"Pipeline is a list of composition function steps that will be used when a\\ncomposite resource referring to this composition is created. One of\\nresources and pipeline must be specified - you cannot specify both.\\n\\n\\nThe Pipeline is only used by the \\"Pipeline\\" mode of Composition. It is\\nignored by other modes."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='pipeline', type=d.T.array)]), + withPipelineMixin(pipeline): { spec+: { pipeline+: if std.isArray(v=pipeline) then pipeline else [pipeline] } }, + '#withResources':: d.fn(help='"Resources is a list of resource templates that will be used when a\\ncomposite resource referring to this composition is created.\\n\\n\\nResources are only used by the \\"Resources\\" mode of Composition. They are\\nignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."', args=[d.arg(name='resources', type=d.T.array)]), + withResources(resources): { spec+: { resources: if std.isArray(v=resources) then resources else [resources] } }, + '#withResourcesMixin':: d.fn(help='"Resources is a list of resource templates that will be used when a\\ncomposite resource referring to this composition is created.\\n\\n\\nResources are only used by the \\"Resources\\" mode of Composition. They are\\nignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resources', type=d.T.array)]), + withResourcesMixin(resources): { spec+: { resources+: if std.isArray(v=resources) then resources else [resources] } }, + '#withWriteConnectionSecretsToNamespace':: d.fn(help='"WriteConnectionSecretsToNamespace specifies the namespace in which the\\nconnection secrets of composite resource dynamically provisioned using\\nthis composition will be created.\\nThis field is planned to be replaced in a future release in favor of\\nPublishConnectionDetailsWithStoreConfigRef. Currently, both could be\\nset independently and connection details would be published to both\\nwithout affecting each other as long as related fields at MR level\\nspecified."', args=[d.arg(name='writeConnectionSecretsToNamespace', type=d.T.string)]), + withWriteConnectionSecretsToNamespace(writeConnectionSecretsToNamespace): { spec+: { writeConnectionSecretsToNamespace: writeConnectionSecretsToNamespace } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/apiextensions/v1/compositionRevision.libsonnet b/crossplane/1.17/_gen/apiextensions/v1/compositionRevision.libsonnet new file mode 100644 index 0000000..38b5673 --- /dev/null +++ b/crossplane/1.17/_gen/apiextensions/v1/compositionRevision.libsonnet @@ -0,0 +1,590 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='compositionRevision', url='', help="\"A CompositionRevision represents a revision of a Composition. Crossplane\\ncreates new revisions when there are changes to the Composition.\\n\\n\\nCrossplane creates and manages CompositionRevisions. Don't directly edit\\nCompositionRevisions.\""), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of CompositionRevision', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'apiextensions.crossplane.io/v1', + kind: 'CompositionRevision', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"CompositionRevisionSpec specifies the desired state of the composition\\nrevision."'), + spec: { + '#compositeTypeRef':: d.obj(help='"CompositeTypeRef specifies the type of composite resource that this\\ncomposition is compatible with."'), + compositeTypeRef: { + '#withApiVersion':: d.fn(help='"APIVersion of the type."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { compositeTypeRef+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind of the type."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { compositeTypeRef+: { kind: kind } } }, + }, + '#environment':: d.obj(help='"Environment configures the environment in which resources are rendered.\\n\\n\\nTHIS IS AN ALPHA FIELD. Do not use it in production. It is not honored\\nunless the relevant Crossplane feature flag is enabled, and may be\\nchanged or removed without notice."'), + environment: { + '#environmentConfigs':: d.obj(help='"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\\nresources are stored in the composite resource at\\n`spec.environmentConfigRefs` and is only updated if it is null.\\n\\n\\nThe list of references is used to compute an in-memory environment at\\ncompose time. The data of all object is merged in the order they are\\nlisted, meaning the values of EnvironmentConfigs with a larger index take\\npriority over ones with smaller indices.\\n\\n\\nThe computed environment can be accessed in a composition using\\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches."'), + environmentConfigs: { + '#ref':: d.obj(help='"Ref is a named reference to a single EnvironmentConfig.\\nEither Ref or Selector is required."'), + ref: { + '#withName':: d.fn(help='"The name of the object."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { ref+: { name: name } }, + }, + '#selector':: d.obj(help='"Selector selects EnvironmentConfig(s) via labels."'), + selector: { + '#matchLabels':: d.obj(help='"MatchLabels ensures an object with matching labels is selected."'), + matchLabels: { + '#withFromFieldPathPolicy':: d.fn(help='"FromFieldPathPolicy specifies the policy for the valueFromFieldPath.\\nThe default is Required, meaning that an error will be returned if the\\nfield is not found in the composite resource.\\nOptional means that if the field is not found in the composite resource,\\nthat label pair will just be skipped. N.B. other specified label\\nmatchers will still be used to retrieve the desired\\nenvironment config, if any."', args=[d.arg(name='fromFieldPathPolicy', type=d.T.string)]), + withFromFieldPathPolicy(fromFieldPathPolicy): { fromFieldPathPolicy: fromFieldPathPolicy }, + '#withKey':: d.fn(help='"Key of the label to match."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withType':: d.fn(help='"Type specifies where the value for a label comes from."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#withValue':: d.fn(help='"Value specifies a literal label value."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + '#withValueFromFieldPath':: d.fn(help='"ValueFromFieldPath specifies the field path to look for the label value."', args=[d.arg(name='valueFromFieldPath', type=d.T.string)]), + withValueFromFieldPath(valueFromFieldPath): { valueFromFieldPath: valueFromFieldPath }, + }, + '#withMatchLabels':: d.fn(help='"MatchLabels ensures an object with matching labels is selected."', args=[d.arg(name='matchLabels', type=d.T.array)]), + withMatchLabels(matchLabels): { selector+: { matchLabels: if std.isArray(v=matchLabels) then matchLabels else [matchLabels] } }, + '#withMatchLabelsMixin':: d.fn(help='"MatchLabels ensures an object with matching labels is selected."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.array)]), + withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: if std.isArray(v=matchLabels) then matchLabels else [matchLabels] } }, + '#withMaxMatch':: d.fn(help='"MaxMatch specifies the number of extracted EnvironmentConfigs in Multiple mode, extracts all if nil."', args=[d.arg(name='maxMatch', type=d.T.integer)]), + withMaxMatch(maxMatch): { selector+: { maxMatch: maxMatch } }, + '#withMinMatch':: d.fn(help='"MinMatch specifies the required minimum of extracted EnvironmentConfigs in Multiple mode."', args=[d.arg(name='minMatch', type=d.T.integer)]), + withMinMatch(minMatch): { selector+: { minMatch: minMatch } }, + '#withMode':: d.fn(help='"Mode specifies retrieval strategy: \\"Single\\" or \\"Multiple\\"."', args=[d.arg(name='mode', type=d.T.string)]), + withMode(mode): { selector+: { mode: mode } }, + '#withSortByFieldPath':: d.fn(help='"SortByFieldPath is the path to the field based on which list of EnvironmentConfigs is alphabetically sorted."', args=[d.arg(name='sortByFieldPath', type=d.T.string)]), + withSortByFieldPath(sortByFieldPath): { selector+: { sortByFieldPath: sortByFieldPath } }, + }, + '#withType':: d.fn(help='"Type specifies the way the EnvironmentConfig is selected.\\nDefault is `Reference`"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#patches':: d.obj(help="\"Patches is a list of environment patches that are executed before a\\ncomposition's resources are composed.\""), + patches: { + '#combine':: d.obj(help='"Combine is the patch configuration for a CombineFromComposite or\\nCombineToComposite patch."'), + combine: { + '#string':: d.obj(help='"String declares that input variables should be combined into a single\\nstring, using the relevant settings for formatting purposes."'), + string: { + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { combine+: { string+: { fmt: fmt } } }, + }, + '#variables':: d.obj(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."'), + variables: { + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the source whose value is\\nto be used as input."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + }, + '#withStrategy':: d.fn(help='"Strategy defines the strategy to use to combine the input variable values.\\nCurrently only string is supported."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { combine+: { strategy: strategy } }, + '#withVariables':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { combine+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { combine+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#policy':: d.obj(help='"Policy configures the specifics of patching behaviour."'), + policy: { + '#mergeOptions':: d.obj(help='"MergeOptions Specifies merge options on a field path."'), + mergeOptions: { + '#withAppendSlice':: d.fn(help='"Specifies that already existing elements in a merged slice should be preserved"', args=[d.arg(name='appendSlice', type=d.T.boolean)]), + withAppendSlice(appendSlice): { policy+: { mergeOptions+: { appendSlice: appendSlice } } }, + '#withKeepMapValues':: d.fn(help='"Specifies that already existing values in a merged map should be preserved"', args=[d.arg(name='keepMapValues', type=d.T.boolean)]), + withKeepMapValues(keepMapValues): { policy+: { mergeOptions+: { keepMapValues: keepMapValues } } }, + }, + '#withFromFieldPath':: d.fn(help="\"FromFieldPath specifies how to patch from a field path. The default is\\n'Optional', which means the patch will be a no-op if the specified\\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\\nthe specified path does not exist.\"", args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { policy+: { fromFieldPath: fromFieldPath } }, + }, + '#transforms':: d.obj(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."'), + transforms: { + '#convert':: d.obj(help='"Convert is used to cast the input into the given output type."'), + convert: { + '#withFormat':: d.fn(help='"The expected input format.\\n\\n\\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\\nOnly used during `string -> float64` conversions.\\n* `json` - parses the input as a JSON string.\\nOnly used during `string -> object` or `string -> list` conversions.\\n\\n\\nIf this property is null, the default conversion is applied."', args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { convert+: { format: format } }, + '#withToType':: d.fn(help='"ToType is the type of the output of this transform."', args=[d.arg(name='toType', type=d.T.string)]), + withToType(toType): { convert+: { toType: toType } }, + }, + '#match':: d.obj(help='"Match is a more complex version of Map that matches a list of patterns."'), + match: { + '#patterns':: d.obj(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."'), + patterns: { + '#withLiteral':: d.fn(help='"Literal exactly matches the input string (case sensitive).\\nIs required if `type` is `literal`."', args=[d.arg(name='literal', type=d.T.string)]), + withLiteral(literal): { literal: literal }, + '#withRegexp':: d.fn(help='"Regexp to match against the input string.\\nIs required if `type` is `regexp`."', args=[d.arg(name='regexp', type=d.T.string)]), + withRegexp(regexp): { regexp: regexp }, + '#withResult':: d.fn(help='"The value that is used as result of the transform if the pattern matches."', args=[d.arg(name='result', type=d.T.any)]), + withResult(result): { result: result }, + '#withType':: d.fn(help='"Type specifies how the pattern matches the input.\\n\\n\\n* `literal` - the pattern value has to exactly match (case sensitive) the\\ninput string. This is the default.\\n\\n\\n* `regexp` - the pattern treated as a regular expression against\\nwhich the input string is tested. Crossplane will throw an error if the\\nkey is not a valid regexp."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFallbackTo':: d.fn(help='"Determines to what value the transform should fallback if no pattern matches."', args=[d.arg(name='fallbackTo', type=d.T.string)]), + withFallbackTo(fallbackTo): { match+: { fallbackTo: fallbackTo } }, + '#withFallbackValue':: d.fn(help='"The fallback value that should be returned by the transform if now pattern\\nmatches."', args=[d.arg(name='fallbackValue', type=d.T.any)]), + withFallbackValue(fallbackValue): { match+: { fallbackValue: fallbackValue } }, + '#withPatterns':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."', args=[d.arg(name='patterns', type=d.T.array)]), + withPatterns(patterns): { match+: { patterns: if std.isArray(v=patterns) then patterns else [patterns] } }, + '#withPatternsMixin':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patterns', type=d.T.array)]), + withPatternsMixin(patterns): { match+: { patterns+: if std.isArray(v=patterns) then patterns else [patterns] } }, + }, + '#math':: d.obj(help='"Math is used to transform the input via mathematical operations such as\\nmultiplication."'), + math: { + '#withClampMax':: d.fn(help='"ClampMax makes sure that the value is not bigger than the given value."', args=[d.arg(name='clampMax', type=d.T.integer)]), + withClampMax(clampMax): { math+: { clampMax: clampMax } }, + '#withClampMin':: d.fn(help='"ClampMin makes sure that the value is not smaller than the given value."', args=[d.arg(name='clampMin', type=d.T.integer)]), + withClampMin(clampMin): { math+: { clampMin: clampMin } }, + '#withMultiply':: d.fn(help='"Multiply the value."', args=[d.arg(name='multiply', type=d.T.integer)]), + withMultiply(multiply): { math+: { multiply: multiply } }, + '#withType':: d.fn(help='"Type of the math transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { math+: { type: type } }, + }, + '#string':: d.obj(help='"String is used to transform the input into a string or a different kind\\nof string. Note that the input does not necessarily need to be a string."'), + string: { + '#join':: d.obj(help='"Join defines parameters to join a slice of values to a string."'), + join: { + '#withSeparator':: d.fn(help='"Separator defines the character that should separate the values from each\\nother in the joined string."', args=[d.arg(name='separator', type=d.T.string)]), + withSeparator(separator): { string+: { join+: { separator: separator } } }, + }, + '#regexp':: d.obj(help='"Extract a match from the input using a regular expression."'), + regexp: { + '#withGroup':: d.fn(help='"Group number to match. 0 (the default) matches the entire expression."', args=[d.arg(name='group', type=d.T.integer)]), + withGroup(group): { string+: { regexp+: { group: group } } }, + '#withMatch':: d.fn(help='"Match string. May optionally include submatches, aka capture groups.\\nSee https://pkg.go.dev/regexp/ for details."', args=[d.arg(name='match', type=d.T.string)]), + withMatch(match): { string+: { regexp+: { match: match } } }, + }, + '#withConvert':: d.fn(help='"Optional conversion method to be specified.\\n`ToUpper` and `ToLower` change the letter case of the input string.\\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\\n`ToJson` converts any input value into its raw JSON representation.\\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\\nconverted to JSON.\\n`ToAdler32` generate a addler32 hash based on the input string."', args=[d.arg(name='convert', type=d.T.string)]), + withConvert(convert): { string+: { convert: convert } }, + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { string+: { fmt: fmt } }, + '#withTrim':: d.fn(help='"Trim the prefix or suffix from the input"', args=[d.arg(name='trim', type=d.T.string)]), + withTrim(trim): { string+: { trim: trim } }, + '#withType':: d.fn(help='"Type of the string transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { string+: { type: type } }, + }, + '#withMap':: d.fn(help='"Map uses the input as a key in the given map and returns the value."', args=[d.arg(name='map', type=d.T.object)]), + withMap(map): { map: map }, + '#withMapMixin':: d.fn(help='"Map uses the input as a key in the given map and returns the value."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='map', type=d.T.object)]), + withMapMixin(map): { map+: map }, + '#withType':: d.fn(help='"Type of the transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the resource whose value is\\nto be used as input. Required when type is FromCompositeFieldPath or\\nToCompositeFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath is the path of the field on the resource whose value will\\nbe changed with the result of transforms. Leave empty if you'd like to\\npropagate to the same path as fromFieldPath.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { toFieldPath: toFieldPath }, + '#withTransforms':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."', args=[d.arg(name='transforms', type=d.T.array)]), + withTransforms(transforms): { transforms: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withTransformsMixin':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='transforms', type=d.T.array)]), + withTransformsMixin(transforms): { transforms+: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withType':: d.fn(help='"Type sets the patching behaviour to be used. Each patch type may require\\nits own fields to be set on the Patch object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#policy':: d.obj(help='"Policy represents the Resolve and Resolution policies which apply to\\nall EnvironmentSourceReferences in EnvironmentConfigs list."'), + policy: { + '#withResolution':: d.fn(help="\"Resolution specifies whether resolution of this reference is required.\\nThe default is 'Required', which means the reconcile will fail if the\\nreference cannot be resolved. 'Optional' means this reference will be\\na no-op if it cannot be resolved.\"", args=[d.arg(name='resolution', type=d.T.string)]), + withResolution(resolution): { spec+: { environment+: { policy+: { resolution: resolution } } } }, + '#withResolve':: d.fn(help="\"Resolve specifies when this reference should be resolved. The default\\nis 'IfNotPresent', which will attempt to resolve the reference only when\\nthe corresponding field is not present. Use 'Always' to resolve the\\nreference on every reconcile.\"", args=[d.arg(name='resolve', type=d.T.string)]), + withResolve(resolve): { spec+: { environment+: { policy+: { resolve: resolve } } } }, + }, + '#withDefaultData':: d.fn(help='"DefaultData statically defines the initial state of the environment.\\nIt has the same schema-less structure as the data field in\\nenvironment configs.\\nIt is overwritten by the selected environment configs."', args=[d.arg(name='defaultData', type=d.T.object)]), + withDefaultData(defaultData): { spec+: { environment+: { defaultData: defaultData } } }, + '#withDefaultDataMixin':: d.fn(help='"DefaultData statically defines the initial state of the environment.\\nIt has the same schema-less structure as the data field in\\nenvironment configs.\\nIt is overwritten by the selected environment configs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='defaultData', type=d.T.object)]), + withDefaultDataMixin(defaultData): { spec+: { environment+: { defaultData+: defaultData } } }, + '#withEnvironmentConfigs':: d.fn(help='"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\\nresources are stored in the composite resource at\\n`spec.environmentConfigRefs` and is only updated if it is null.\\n\\n\\nThe list of references is used to compute an in-memory environment at\\ncompose time. The data of all object is merged in the order they are\\nlisted, meaning the values of EnvironmentConfigs with a larger index take\\npriority over ones with smaller indices.\\n\\n\\nThe computed environment can be accessed in a composition using\\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches."', args=[d.arg(name='environmentConfigs', type=d.T.array)]), + withEnvironmentConfigs(environmentConfigs): { spec+: { environment+: { environmentConfigs: if std.isArray(v=environmentConfigs) then environmentConfigs else [environmentConfigs] } } }, + '#withEnvironmentConfigsMixin':: d.fn(help='"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\\nresources are stored in the composite resource at\\n`spec.environmentConfigRefs` and is only updated if it is null.\\n\\n\\nThe list of references is used to compute an in-memory environment at\\ncompose time. The data of all object is merged in the order they are\\nlisted, meaning the values of EnvironmentConfigs with a larger index take\\npriority over ones with smaller indices.\\n\\n\\nThe computed environment can be accessed in a composition using\\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='environmentConfigs', type=d.T.array)]), + withEnvironmentConfigsMixin(environmentConfigs): { spec+: { environment+: { environmentConfigs+: if std.isArray(v=environmentConfigs) then environmentConfigs else [environmentConfigs] } } }, + '#withPatches':: d.fn(help="\"Patches is a list of environment patches that are executed before a\\ncomposition's resources are composed.\"", args=[d.arg(name='patches', type=d.T.array)]), + withPatches(patches): { spec+: { environment+: { patches: if std.isArray(v=patches) then patches else [patches] } } }, + '#withPatchesMixin':: d.fn(help="\"Patches is a list of environment patches that are executed before a\\ncomposition's resources are composed.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='patches', type=d.T.array)]), + withPatchesMixin(patches): { spec+: { environment+: { patches+: if std.isArray(v=patches) then patches else [patches] } } }, + }, + '#patchSets':: d.obj(help='"PatchSets define a named set of patches that may be included by any\\nresource in this Composition. PatchSets cannot themselves refer to other\\nPatchSets.\\n\\n\\nPatchSets are only used by the \\"Resources\\" mode of Composition. They\\nare ignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."'), + patchSets: { + '#patches':: d.obj(help='"Patches will be applied as an overlay to the base resource."'), + patches: { + '#combine':: d.obj(help='"Combine is the patch configuration for a CombineFromComposite,\\nCombineFromEnvironment, CombineToComposite or CombineToEnvironment patch."'), + combine: { + '#string':: d.obj(help='"String declares that input variables should be combined into a single\\nstring, using the relevant settings for formatting purposes."'), + string: { + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { combine+: { string+: { fmt: fmt } } }, + }, + '#variables':: d.obj(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."'), + variables: { + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the source whose value is\\nto be used as input."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + }, + '#withStrategy':: d.fn(help='"Strategy defines the strategy to use to combine the input variable values.\\nCurrently only string is supported."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { combine+: { strategy: strategy } }, + '#withVariables':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { combine+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { combine+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#policy':: d.obj(help='"Policy configures the specifics of patching behaviour."'), + policy: { + '#mergeOptions':: d.obj(help='"MergeOptions Specifies merge options on a field path."'), + mergeOptions: { + '#withAppendSlice':: d.fn(help='"Specifies that already existing elements in a merged slice should be preserved"', args=[d.arg(name='appendSlice', type=d.T.boolean)]), + withAppendSlice(appendSlice): { policy+: { mergeOptions+: { appendSlice: appendSlice } } }, + '#withKeepMapValues':: d.fn(help='"Specifies that already existing values in a merged map should be preserved"', args=[d.arg(name='keepMapValues', type=d.T.boolean)]), + withKeepMapValues(keepMapValues): { policy+: { mergeOptions+: { keepMapValues: keepMapValues } } }, + }, + '#withFromFieldPath':: d.fn(help="\"FromFieldPath specifies how to patch from a field path. The default is\\n'Optional', which means the patch will be a no-op if the specified\\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\\nthe specified path does not exist.\"", args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { policy+: { fromFieldPath: fromFieldPath } }, + }, + '#transforms':: d.obj(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."'), + transforms: { + '#convert':: d.obj(help='"Convert is used to cast the input into the given output type."'), + convert: { + '#withFormat':: d.fn(help='"The expected input format.\\n\\n\\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\\nOnly used during `string -> float64` conversions.\\n* `json` - parses the input as a JSON string.\\nOnly used during `string -> object` or `string -> list` conversions.\\n\\n\\nIf this property is null, the default conversion is applied."', args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { convert+: { format: format } }, + '#withToType':: d.fn(help='"ToType is the type of the output of this transform."', args=[d.arg(name='toType', type=d.T.string)]), + withToType(toType): { convert+: { toType: toType } }, + }, + '#match':: d.obj(help='"Match is a more complex version of Map that matches a list of patterns."'), + match: { + '#patterns':: d.obj(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."'), + patterns: { + '#withLiteral':: d.fn(help='"Literal exactly matches the input string (case sensitive).\\nIs required if `type` is `literal`."', args=[d.arg(name='literal', type=d.T.string)]), + withLiteral(literal): { literal: literal }, + '#withRegexp':: d.fn(help='"Regexp to match against the input string.\\nIs required if `type` is `regexp`."', args=[d.arg(name='regexp', type=d.T.string)]), + withRegexp(regexp): { regexp: regexp }, + '#withResult':: d.fn(help='"The value that is used as result of the transform if the pattern matches."', args=[d.arg(name='result', type=d.T.any)]), + withResult(result): { result: result }, + '#withType':: d.fn(help='"Type specifies how the pattern matches the input.\\n\\n\\n* `literal` - the pattern value has to exactly match (case sensitive) the\\ninput string. This is the default.\\n\\n\\n* `regexp` - the pattern treated as a regular expression against\\nwhich the input string is tested. Crossplane will throw an error if the\\nkey is not a valid regexp."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFallbackTo':: d.fn(help='"Determines to what value the transform should fallback if no pattern matches."', args=[d.arg(name='fallbackTo', type=d.T.string)]), + withFallbackTo(fallbackTo): { match+: { fallbackTo: fallbackTo } }, + '#withFallbackValue':: d.fn(help='"The fallback value that should be returned by the transform if now pattern\\nmatches."', args=[d.arg(name='fallbackValue', type=d.T.any)]), + withFallbackValue(fallbackValue): { match+: { fallbackValue: fallbackValue } }, + '#withPatterns':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."', args=[d.arg(name='patterns', type=d.T.array)]), + withPatterns(patterns): { match+: { patterns: if std.isArray(v=patterns) then patterns else [patterns] } }, + '#withPatternsMixin':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patterns', type=d.T.array)]), + withPatternsMixin(patterns): { match+: { patterns+: if std.isArray(v=patterns) then patterns else [patterns] } }, + }, + '#math':: d.obj(help='"Math is used to transform the input via mathematical operations such as\\nmultiplication."'), + math: { + '#withClampMax':: d.fn(help='"ClampMax makes sure that the value is not bigger than the given value."', args=[d.arg(name='clampMax', type=d.T.integer)]), + withClampMax(clampMax): { math+: { clampMax: clampMax } }, + '#withClampMin':: d.fn(help='"ClampMin makes sure that the value is not smaller than the given value."', args=[d.arg(name='clampMin', type=d.T.integer)]), + withClampMin(clampMin): { math+: { clampMin: clampMin } }, + '#withMultiply':: d.fn(help='"Multiply the value."', args=[d.arg(name='multiply', type=d.T.integer)]), + withMultiply(multiply): { math+: { multiply: multiply } }, + '#withType':: d.fn(help='"Type of the math transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { math+: { type: type } }, + }, + '#string':: d.obj(help='"String is used to transform the input into a string or a different kind\\nof string. Note that the input does not necessarily need to be a string."'), + string: { + '#join':: d.obj(help='"Join defines parameters to join a slice of values to a string."'), + join: { + '#withSeparator':: d.fn(help='"Separator defines the character that should separate the values from each\\nother in the joined string."', args=[d.arg(name='separator', type=d.T.string)]), + withSeparator(separator): { string+: { join+: { separator: separator } } }, + }, + '#regexp':: d.obj(help='"Extract a match from the input using a regular expression."'), + regexp: { + '#withGroup':: d.fn(help='"Group number to match. 0 (the default) matches the entire expression."', args=[d.arg(name='group', type=d.T.integer)]), + withGroup(group): { string+: { regexp+: { group: group } } }, + '#withMatch':: d.fn(help='"Match string. May optionally include submatches, aka capture groups.\\nSee https://pkg.go.dev/regexp/ for details."', args=[d.arg(name='match', type=d.T.string)]), + withMatch(match): { string+: { regexp+: { match: match } } }, + }, + '#withConvert':: d.fn(help='"Optional conversion method to be specified.\\n`ToUpper` and `ToLower` change the letter case of the input string.\\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\\n`ToJson` converts any input value into its raw JSON representation.\\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\\nconverted to JSON.\\n`ToAdler32` generate a addler32 hash based on the input string."', args=[d.arg(name='convert', type=d.T.string)]), + withConvert(convert): { string+: { convert: convert } }, + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { string+: { fmt: fmt } }, + '#withTrim':: d.fn(help='"Trim the prefix or suffix from the input"', args=[d.arg(name='trim', type=d.T.string)]), + withTrim(trim): { string+: { trim: trim } }, + '#withType':: d.fn(help='"Type of the string transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { string+: { type: type } }, + }, + '#withMap':: d.fn(help='"Map uses the input as a key in the given map and returns the value."', args=[d.arg(name='map', type=d.T.object)]), + withMap(map): { map: map }, + '#withMapMixin':: d.fn(help='"Map uses the input as a key in the given map and returns the value."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='map', type=d.T.object)]), + withMapMixin(map): { map+: map }, + '#withType':: d.fn(help='"Type of the transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the resource whose value is\\nto be used as input. Required when type is FromCompositeFieldPath,\\nFromEnvironmentFieldPath, ToCompositeFieldPath, ToEnvironmentFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withPatchSetName':: d.fn(help='"PatchSetName to include patches from. Required when type is PatchSet."', args=[d.arg(name='patchSetName', type=d.T.string)]), + withPatchSetName(patchSetName): { patchSetName: patchSetName }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath is the path of the field on the resource whose value will\\nbe changed with the result of transforms. Leave empty if you'd like to\\npropagate to the same path as fromFieldPath.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { toFieldPath: toFieldPath }, + '#withTransforms':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."', args=[d.arg(name='transforms', type=d.T.array)]), + withTransforms(transforms): { transforms: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withTransformsMixin':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='transforms', type=d.T.array)]), + withTransformsMixin(transforms): { transforms+: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withType':: d.fn(help='"Type sets the patching behaviour to be used. Each patch type may require\\nits own fields to be set on the Patch object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withName':: d.fn(help='"Name of this PatchSet."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPatches':: d.fn(help='"Patches will be applied as an overlay to the base resource."', args=[d.arg(name='patches', type=d.T.array)]), + withPatches(patches): { patches: if std.isArray(v=patches) then patches else [patches] }, + '#withPatchesMixin':: d.fn(help='"Patches will be applied as an overlay to the base resource."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patches', type=d.T.array)]), + withPatchesMixin(patches): { patches+: if std.isArray(v=patches) then patches else [patches] }, + }, + '#pipeline':: d.obj(help='"Pipeline is a list of composition function steps that will be used when a\\ncomposite resource referring to this composition is created. One of\\nresources and pipeline must be specified - you cannot specify both.\\n\\n\\nThe Pipeline is only used by the \\"Pipeline\\" mode of Composition. It is\\nignored by other modes."'), + pipeline: { + '#credentials':: d.obj(help='"Credentials are optional credentials that the Composition Function needs."'), + credentials: { + '#secretRef':: d.obj(help='"A SecretRef is a reference to a secret containing credentials that should\\nbe supplied to the function."'), + secretRef: { + '#withName':: d.fn(help='"Name of the secret."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace of the secret."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { secretRef+: { namespace: namespace } }, + }, + '#withName':: d.fn(help='"Name of this set of credentials."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withSource':: d.fn(help='"Source of the function credentials."', args=[d.arg(name='source', type=d.T.string)]), + withSource(source): { source: source }, + }, + '#functionRef':: d.obj(help='"FunctionRef is a reference to the Composition Function this step should\\nexecute."'), + functionRef: { + '#withName':: d.fn(help='"Name of the referenced Function."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { functionRef+: { name: name } }, + }, + '#withCredentials':: d.fn(help='"Credentials are optional credentials that the Composition Function needs."', args=[d.arg(name='credentials', type=d.T.array)]), + withCredentials(credentials): { credentials: if std.isArray(v=credentials) then credentials else [credentials] }, + '#withCredentialsMixin':: d.fn(help='"Credentials are optional credentials that the Composition Function needs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='credentials', type=d.T.array)]), + withCredentialsMixin(credentials): { credentials+: if std.isArray(v=credentials) then credentials else [credentials] }, + '#withInput':: d.fn(help="\"Input is an optional, arbitrary Kubernetes resource (i.e. a resource\\nwith an apiVersion and kind) that will be passed to the Composition\\nFunction as the 'input' of its RunFunctionRequest.\"", args=[d.arg(name='input', type=d.T.object)]), + withInput(input): { input: input }, + '#withInputMixin':: d.fn(help="\"Input is an optional, arbitrary Kubernetes resource (i.e. a resource\\nwith an apiVersion and kind) that will be passed to the Composition\\nFunction as the 'input' of its RunFunctionRequest.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='input', type=d.T.object)]), + withInputMixin(input): { input+: input }, + '#withStep':: d.fn(help='"Step name. Must be unique within its Pipeline."', args=[d.arg(name='step', type=d.T.string)]), + withStep(step): { step: step }, + }, + '#publishConnectionDetailsWithStoreConfigRef':: d.obj(help='"PublishConnectionDetailsWithStoreConfig specifies the secret store config\\nwith which the connection details of composite resources dynamically\\nprovisioned using this composition will be published.\\n\\n\\nTHIS IS AN ALPHA FIELD. Do not use it in production. It is not honored\\nunless the relevant Crossplane feature flag is enabled, and may be\\nchanged or removed without notice."'), + publishConnectionDetailsWithStoreConfigRef: { + '#withName':: d.fn(help='"Name of the referenced StoreConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { publishConnectionDetailsWithStoreConfigRef+: { name: name } } }, + }, + '#resources':: d.obj(help='"Resources is a list of resource templates that will be used when a\\ncomposite resource referring to this composition is created.\\n\\n\\nResources are only used by the \\"Resources\\" mode of Composition. They are\\nignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."'), + resources: { + '#connectionDetails':: d.obj(help='"ConnectionDetails lists the propagation secret keys from this target\\nresource to the composition instance connection secret."'), + connectionDetails: { + '#withFromConnectionSecretKey':: d.fn(help="\"FromConnectionSecretKey is the key that will be used to fetch the value\\nfrom the composed resource's connection secret.\"", args=[d.arg(name='fromConnectionSecretKey', type=d.T.string)]), + withFromConnectionSecretKey(fromConnectionSecretKey): { fromConnectionSecretKey: fromConnectionSecretKey }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the composed resource whose\\nvalue to be used as input. Name must be specified if the type is\\nFromFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withName':: d.fn(help="\"Name of the connection secret key that will be propagated to the\\nconnection secret of the composition instance. Leave empty if you'd like\\nto use the same key name.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withType':: d.fn(help='"Type sets the connection detail fetching behaviour to be used. Each\\nconnection detail type may require its own fields to be set on the\\nConnectionDetail object. If the type is omitted Crossplane will attempt\\nto infer it based on which other fields were specified. If multiple\\nfields are specified the order of precedence is:\\n1. FromValue\\n2. FromConnectionSecretKey\\n3. FromFieldPath"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#withValue':: d.fn(help='"Value that will be propagated to the connection secret of the composite\\nresource. May be set to inject a fixed, non-sensitive connection secret\\nvalue, for example a well-known port."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#patches':: d.obj(help='"Patches will be applied as overlay to the base resource."'), + patches: { + '#combine':: d.obj(help='"Combine is the patch configuration for a CombineFromComposite,\\nCombineFromEnvironment, CombineToComposite or CombineToEnvironment patch."'), + combine: { + '#string':: d.obj(help='"String declares that input variables should be combined into a single\\nstring, using the relevant settings for formatting purposes."'), + string: { + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { combine+: { string+: { fmt: fmt } } }, + }, + '#variables':: d.obj(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."'), + variables: { + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the source whose value is\\nto be used as input."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + }, + '#withStrategy':: d.fn(help='"Strategy defines the strategy to use to combine the input variable values.\\nCurrently only string is supported."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { combine+: { strategy: strategy } }, + '#withVariables':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { combine+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { combine+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#policy':: d.obj(help='"Policy configures the specifics of patching behaviour."'), + policy: { + '#mergeOptions':: d.obj(help='"MergeOptions Specifies merge options on a field path."'), + mergeOptions: { + '#withAppendSlice':: d.fn(help='"Specifies that already existing elements in a merged slice should be preserved"', args=[d.arg(name='appendSlice', type=d.T.boolean)]), + withAppendSlice(appendSlice): { policy+: { mergeOptions+: { appendSlice: appendSlice } } }, + '#withKeepMapValues':: d.fn(help='"Specifies that already existing values in a merged map should be preserved"', args=[d.arg(name='keepMapValues', type=d.T.boolean)]), + withKeepMapValues(keepMapValues): { policy+: { mergeOptions+: { keepMapValues: keepMapValues } } }, + }, + '#withFromFieldPath':: d.fn(help="\"FromFieldPath specifies how to patch from a field path. The default is\\n'Optional', which means the patch will be a no-op if the specified\\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\\nthe specified path does not exist.\"", args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { policy+: { fromFieldPath: fromFieldPath } }, + }, + '#transforms':: d.obj(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."'), + transforms: { + '#convert':: d.obj(help='"Convert is used to cast the input into the given output type."'), + convert: { + '#withFormat':: d.fn(help='"The expected input format.\\n\\n\\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\\nOnly used during `string -> float64` conversions.\\n* `json` - parses the input as a JSON string.\\nOnly used during `string -> object` or `string -> list` conversions.\\n\\n\\nIf this property is null, the default conversion is applied."', args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { convert+: { format: format } }, + '#withToType':: d.fn(help='"ToType is the type of the output of this transform."', args=[d.arg(name='toType', type=d.T.string)]), + withToType(toType): { convert+: { toType: toType } }, + }, + '#match':: d.obj(help='"Match is a more complex version of Map that matches a list of patterns."'), + match: { + '#patterns':: d.obj(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."'), + patterns: { + '#withLiteral':: d.fn(help='"Literal exactly matches the input string (case sensitive).\\nIs required if `type` is `literal`."', args=[d.arg(name='literal', type=d.T.string)]), + withLiteral(literal): { literal: literal }, + '#withRegexp':: d.fn(help='"Regexp to match against the input string.\\nIs required if `type` is `regexp`."', args=[d.arg(name='regexp', type=d.T.string)]), + withRegexp(regexp): { regexp: regexp }, + '#withResult':: d.fn(help='"The value that is used as result of the transform if the pattern matches."', args=[d.arg(name='result', type=d.T.any)]), + withResult(result): { result: result }, + '#withType':: d.fn(help='"Type specifies how the pattern matches the input.\\n\\n\\n* `literal` - the pattern value has to exactly match (case sensitive) the\\ninput string. This is the default.\\n\\n\\n* `regexp` - the pattern treated as a regular expression against\\nwhich the input string is tested. Crossplane will throw an error if the\\nkey is not a valid regexp."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFallbackTo':: d.fn(help='"Determines to what value the transform should fallback if no pattern matches."', args=[d.arg(name='fallbackTo', type=d.T.string)]), + withFallbackTo(fallbackTo): { match+: { fallbackTo: fallbackTo } }, + '#withFallbackValue':: d.fn(help='"The fallback value that should be returned by the transform if now pattern\\nmatches."', args=[d.arg(name='fallbackValue', type=d.T.any)]), + withFallbackValue(fallbackValue): { match+: { fallbackValue: fallbackValue } }, + '#withPatterns':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."', args=[d.arg(name='patterns', type=d.T.array)]), + withPatterns(patterns): { match+: { patterns: if std.isArray(v=patterns) then patterns else [patterns] } }, + '#withPatternsMixin':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patterns', type=d.T.array)]), + withPatternsMixin(patterns): { match+: { patterns+: if std.isArray(v=patterns) then patterns else [patterns] } }, + }, + '#math':: d.obj(help='"Math is used to transform the input via mathematical operations such as\\nmultiplication."'), + math: { + '#withClampMax':: d.fn(help='"ClampMax makes sure that the value is not bigger than the given value."', args=[d.arg(name='clampMax', type=d.T.integer)]), + withClampMax(clampMax): { math+: { clampMax: clampMax } }, + '#withClampMin':: d.fn(help='"ClampMin makes sure that the value is not smaller than the given value."', args=[d.arg(name='clampMin', type=d.T.integer)]), + withClampMin(clampMin): { math+: { clampMin: clampMin } }, + '#withMultiply':: d.fn(help='"Multiply the value."', args=[d.arg(name='multiply', type=d.T.integer)]), + withMultiply(multiply): { math+: { multiply: multiply } }, + '#withType':: d.fn(help='"Type of the math transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { math+: { type: type } }, + }, + '#string':: d.obj(help='"String is used to transform the input into a string or a different kind\\nof string. Note that the input does not necessarily need to be a string."'), + string: { + '#join':: d.obj(help='"Join defines parameters to join a slice of values to a string."'), + join: { + '#withSeparator':: d.fn(help='"Separator defines the character that should separate the values from each\\nother in the joined string."', args=[d.arg(name='separator', type=d.T.string)]), + withSeparator(separator): { string+: { join+: { separator: separator } } }, + }, + '#regexp':: d.obj(help='"Extract a match from the input using a regular expression."'), + regexp: { + '#withGroup':: d.fn(help='"Group number to match. 0 (the default) matches the entire expression."', args=[d.arg(name='group', type=d.T.integer)]), + withGroup(group): { string+: { regexp+: { group: group } } }, + '#withMatch':: d.fn(help='"Match string. May optionally include submatches, aka capture groups.\\nSee https://pkg.go.dev/regexp/ for details."', args=[d.arg(name='match', type=d.T.string)]), + withMatch(match): { string+: { regexp+: { match: match } } }, + }, + '#withConvert':: d.fn(help='"Optional conversion method to be specified.\\n`ToUpper` and `ToLower` change the letter case of the input string.\\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\\n`ToJson` converts any input value into its raw JSON representation.\\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\\nconverted to JSON.\\n`ToAdler32` generate a addler32 hash based on the input string."', args=[d.arg(name='convert', type=d.T.string)]), + withConvert(convert): { string+: { convert: convert } }, + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { string+: { fmt: fmt } }, + '#withTrim':: d.fn(help='"Trim the prefix or suffix from the input"', args=[d.arg(name='trim', type=d.T.string)]), + withTrim(trim): { string+: { trim: trim } }, + '#withType':: d.fn(help='"Type of the string transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { string+: { type: type } }, + }, + '#withMap':: d.fn(help='"Map uses the input as a key in the given map and returns the value."', args=[d.arg(name='map', type=d.T.object)]), + withMap(map): { map: map }, + '#withMapMixin':: d.fn(help='"Map uses the input as a key in the given map and returns the value."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='map', type=d.T.object)]), + withMapMixin(map): { map+: map }, + '#withType':: d.fn(help='"Type of the transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the resource whose value is\\nto be used as input. Required when type is FromCompositeFieldPath,\\nFromEnvironmentFieldPath, ToCompositeFieldPath, ToEnvironmentFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withPatchSetName':: d.fn(help='"PatchSetName to include patches from. Required when type is PatchSet."', args=[d.arg(name='patchSetName', type=d.T.string)]), + withPatchSetName(patchSetName): { patchSetName: patchSetName }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath is the path of the field on the resource whose value will\\nbe changed with the result of transforms. Leave empty if you'd like to\\npropagate to the same path as fromFieldPath.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { toFieldPath: toFieldPath }, + '#withTransforms':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."', args=[d.arg(name='transforms', type=d.T.array)]), + withTransforms(transforms): { transforms: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withTransformsMixin':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='transforms', type=d.T.array)]), + withTransformsMixin(transforms): { transforms+: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withType':: d.fn(help='"Type sets the patching behaviour to be used. Each patch type may require\\nits own fields to be set on the Patch object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#readinessChecks':: d.obj(help='"ReadinessChecks allows users to define custom readiness checks. All checks\\nhave to return true in order for resource to be considered ready. The\\ndefault readiness check is to have the \\"Ready\\" condition to be \\"True\\"."'), + readinessChecks: { + '#matchCondition':: d.obj(help="\"MatchCondition specifies the condition you'd like to match if you're using \\\"MatchCondition\\\" type.\""), + matchCondition: { + '#withType':: d.fn(help="\"Type indicates the type of condition you'd like to use.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { matchCondition+: { type: type } }, + }, + '#withFieldPath':: d.fn(help='"FieldPath shows the path of the field whose value will be used."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { fieldPath: fieldPath }, + '#withMatchInteger':: d.fn(help="\"MatchInt is the value you'd like to match if you're using \\\"MatchInt\\\" type.\"", args=[d.arg(name='matchInteger', type=d.T.integer)]), + withMatchInteger(matchInteger): { matchInteger: matchInteger }, + '#withMatchString':: d.fn(help="\"MatchString is the value you'd like to match if you're using \\\"MatchString\\\" type.\"", args=[d.arg(name='matchString', type=d.T.string)]), + withMatchString(matchString): { matchString: matchString }, + '#withType':: d.fn(help="\"Type indicates the type of probe you'd like to use.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withBase':: d.fn(help='"Base is the target resource that the patches will be applied on."', args=[d.arg(name='base', type=d.T.object)]), + withBase(base): { base: base }, + '#withBaseMixin':: d.fn(help='"Base is the target resource that the patches will be applied on."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='base', type=d.T.object)]), + withBaseMixin(base): { base+: base }, + '#withConnectionDetails':: d.fn(help='"ConnectionDetails lists the propagation secret keys from this target\\nresource to the composition instance connection secret."', args=[d.arg(name='connectionDetails', type=d.T.array)]), + withConnectionDetails(connectionDetails): { connectionDetails: if std.isArray(v=connectionDetails) then connectionDetails else [connectionDetails] }, + '#withConnectionDetailsMixin':: d.fn(help='"ConnectionDetails lists the propagation secret keys from this target\\nresource to the composition instance connection secret."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='connectionDetails', type=d.T.array)]), + withConnectionDetailsMixin(connectionDetails): { connectionDetails+: if std.isArray(v=connectionDetails) then connectionDetails else [connectionDetails] }, + '#withName':: d.fn(help="\"A Name uniquely identifies this entry within its Composition's resources\\narray. Names are optional but *strongly* recommended. When all entries in\\nthe resources array are named entries may added, deleted, and reordered\\nas long as their names do not change. When entries are not named the\\nlength and order of the resources array should be treated as immutable.\\nEither all or no entries must be named.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPatches':: d.fn(help='"Patches will be applied as overlay to the base resource."', args=[d.arg(name='patches', type=d.T.array)]), + withPatches(patches): { patches: if std.isArray(v=patches) then patches else [patches] }, + '#withPatchesMixin':: d.fn(help='"Patches will be applied as overlay to the base resource."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patches', type=d.T.array)]), + withPatchesMixin(patches): { patches+: if std.isArray(v=patches) then patches else [patches] }, + '#withReadinessChecks':: d.fn(help='"ReadinessChecks allows users to define custom readiness checks. All checks\\nhave to return true in order for resource to be considered ready. The\\ndefault readiness check is to have the \\"Ready\\" condition to be \\"True\\"."', args=[d.arg(name='readinessChecks', type=d.T.array)]), + withReadinessChecks(readinessChecks): { readinessChecks: if std.isArray(v=readinessChecks) then readinessChecks else [readinessChecks] }, + '#withReadinessChecksMixin':: d.fn(help='"ReadinessChecks allows users to define custom readiness checks. All checks\\nhave to return true in order for resource to be considered ready. The\\ndefault readiness check is to have the \\"Ready\\" condition to be \\"True\\"."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessChecks', type=d.T.array)]), + withReadinessChecksMixin(readinessChecks): { readinessChecks+: if std.isArray(v=readinessChecks) then readinessChecks else [readinessChecks] }, + }, + '#withMode':: d.fn(help="\"Mode controls what type or \\\"mode\\\" of Composition will be used.\\n\\n\\n\\\"Pipeline\\\" indicates that a Composition specifies a pipeline of\\nComposition Functions, each of which is responsible for producing\\ncomposed resources that Crossplane should create or update.\\n\\n\\n\\\"Resources\\\" indicates that a Composition uses what is commonly referred\\nto as \\\"Patch \u0026 Transform\\\" or P\u0026T composition. This mode of Composition\\nuses an array of resources, each a template for a composed resource.\\n\\n\\nAll Compositions should use Pipeline mode. Resources mode is deprecated.\\nResources mode won't be removed in Crossplane 1.x, and will remain the\\ndefault to avoid breaking legacy Compositions. However, it's no longer\\naccepting new features, and only accepting security related bug fixes.\"", args=[d.arg(name='mode', type=d.T.string)]), + withMode(mode): { spec+: { mode: mode } }, + '#withPatchSets':: d.fn(help='"PatchSets define a named set of patches that may be included by any\\nresource in this Composition. PatchSets cannot themselves refer to other\\nPatchSets.\\n\\n\\nPatchSets are only used by the \\"Resources\\" mode of Composition. They\\nare ignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."', args=[d.arg(name='patchSets', type=d.T.array)]), + withPatchSets(patchSets): { spec+: { patchSets: if std.isArray(v=patchSets) then patchSets else [patchSets] } }, + '#withPatchSetsMixin':: d.fn(help='"PatchSets define a named set of patches that may be included by any\\nresource in this Composition. PatchSets cannot themselves refer to other\\nPatchSets.\\n\\n\\nPatchSets are only used by the \\"Resources\\" mode of Composition. They\\nare ignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patchSets', type=d.T.array)]), + withPatchSetsMixin(patchSets): { spec+: { patchSets+: if std.isArray(v=patchSets) then patchSets else [patchSets] } }, + '#withPipeline':: d.fn(help='"Pipeline is a list of composition function steps that will be used when a\\ncomposite resource referring to this composition is created. One of\\nresources and pipeline must be specified - you cannot specify both.\\n\\n\\nThe Pipeline is only used by the \\"Pipeline\\" mode of Composition. It is\\nignored by other modes."', args=[d.arg(name='pipeline', type=d.T.array)]), + withPipeline(pipeline): { spec+: { pipeline: if std.isArray(v=pipeline) then pipeline else [pipeline] } }, + '#withPipelineMixin':: d.fn(help='"Pipeline is a list of composition function steps that will be used when a\\ncomposite resource referring to this composition is created. One of\\nresources and pipeline must be specified - you cannot specify both.\\n\\n\\nThe Pipeline is only used by the \\"Pipeline\\" mode of Composition. It is\\nignored by other modes."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='pipeline', type=d.T.array)]), + withPipelineMixin(pipeline): { spec+: { pipeline+: if std.isArray(v=pipeline) then pipeline else [pipeline] } }, + '#withResources':: d.fn(help='"Resources is a list of resource templates that will be used when a\\ncomposite resource referring to this composition is created.\\n\\n\\nResources are only used by the \\"Resources\\" mode of Composition. They are\\nignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."', args=[d.arg(name='resources', type=d.T.array)]), + withResources(resources): { spec+: { resources: if std.isArray(v=resources) then resources else [resources] } }, + '#withResourcesMixin':: d.fn(help='"Resources is a list of resource templates that will be used when a\\ncomposite resource referring to this composition is created.\\n\\n\\nResources are only used by the \\"Resources\\" mode of Composition. They are\\nignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resources', type=d.T.array)]), + withResourcesMixin(resources): { spec+: { resources+: if std.isArray(v=resources) then resources else [resources] } }, + '#withRevision':: d.fn(help='"Revision number. Newer revisions have larger numbers."', args=[d.arg(name='revision', type=d.T.integer)]), + withRevision(revision): { spec+: { revision: revision } }, + '#withWriteConnectionSecretsToNamespace':: d.fn(help='"WriteConnectionSecretsToNamespace specifies the namespace in which the\\nconnection secrets of composite resource dynamically provisioned using\\nthis composition will be created.\\nThis field is planned to be replaced in a future release in favor of\\nPublishConnectionDetailsWithStoreConfigRef. Currently, both could be\\nset independently and connection details would be published to both\\nwithout affecting each other as long as related fields at MR level\\nspecified."', args=[d.arg(name='writeConnectionSecretsToNamespace', type=d.T.string)]), + withWriteConnectionSecretsToNamespace(writeConnectionSecretsToNamespace): { spec+: { writeConnectionSecretsToNamespace: writeConnectionSecretsToNamespace } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/apiextensions/v1/main.libsonnet b/crossplane/1.17/_gen/apiextensions/v1/main.libsonnet new file mode 100644 index 0000000..3eeb4d3 --- /dev/null +++ b/crossplane/1.17/_gen/apiextensions/v1/main.libsonnet @@ -0,0 +1,7 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1', url='', help=''), + compositeResourceDefinition: (import 'compositeResourceDefinition.libsonnet'), + composition: (import 'composition.libsonnet'), + compositionRevision: (import 'compositionRevision.libsonnet'), +} diff --git a/crossplane/1.17/_gen/apiextensions/v1alpha1/environmentConfig.libsonnet b/crossplane/1.17/_gen/apiextensions/v1alpha1/environmentConfig.libsonnet new file mode 100644 index 0000000..f09fc56 --- /dev/null +++ b/crossplane/1.17/_gen/apiextensions/v1alpha1/environmentConfig.libsonnet @@ -0,0 +1,58 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='environmentConfig', url='', help='"An EnvironmentConfig contains user-defined unstructured values for\\nuse in a Composition.\\n\\n\\nRead the Crossplane documentation for\\n[more information about EnvironmentConfigs](https://docs.crossplane.io/latest/concepts/environment-configs)."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of EnvironmentConfig', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'apiextensions.crossplane.io/v1alpha1', + kind: 'EnvironmentConfig', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#withData':: d.fn(help='"The data of this EnvironmentConfig.\\nThis may contain any kind of structure that can be serialized into JSON."', args=[d.arg(name='data', type=d.T.object)]), + withData(data): { data: data }, + '#withDataMixin':: d.fn(help='"The data of this EnvironmentConfig.\\nThis may contain any kind of structure that can be serialized into JSON."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='data', type=d.T.object)]), + withDataMixin(data): { data+: data }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/apiextensions/v1alpha1/main.libsonnet b/crossplane/1.17/_gen/apiextensions/v1alpha1/main.libsonnet new file mode 100644 index 0000000..f7a058f --- /dev/null +++ b/crossplane/1.17/_gen/apiextensions/v1alpha1/main.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha1', url='', help=''), + environmentConfig: (import 'environmentConfig.libsonnet'), + usage: (import 'usage.libsonnet'), +} diff --git a/crossplane/1.17/_gen/apiextensions/v1alpha1/usage.libsonnet b/crossplane/1.17/_gen/apiextensions/v1alpha1/usage.libsonnet new file mode 100644 index 0000000..2db3b23 --- /dev/null +++ b/crossplane/1.17/_gen/apiextensions/v1alpha1/usage.libsonnet @@ -0,0 +1,103 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='usage', url='', help='"A Usage defines a deletion blocking relationship between two resources.\\n\\n\\nUsages prevent accidental deletion of a single resource or deletion of\\nresources with dependent resources.\\n\\n\\nRead the Crossplane documentation for\\n[more information about Compositions](https://docs.crossplane.io/latest/concepts/usages)."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Usage', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'apiextensions.crossplane.io/v1alpha1', + kind: 'Usage', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"UsageSpec defines the desired state of Usage."'), + spec: { + '#by':: d.obj(help='"By is the resource that is \\"using the other resource\\"."'), + by: { + '#resourceRef':: d.obj(help='"Reference to the resource."'), + resourceRef: { + '#withName':: d.fn(help='"Name of the referent."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { by+: { resourceRef+: { name: name } } } }, + }, + '#resourceSelector':: d.obj(help='"Selector to the resource.\\nThis field will be ignored if ResourceRef is set."'), + resourceSelector: { + '#withMatchControllerRef':: d.fn(help='"MatchControllerRef ensures an object with the same controller reference\\nas the selecting object is selected."', args=[d.arg(name='matchControllerRef', type=d.T.boolean)]), + withMatchControllerRef(matchControllerRef): { spec+: { by+: { resourceSelector+: { matchControllerRef: matchControllerRef } } } }, + '#withMatchLabels':: d.fn(help='"MatchLabels ensures an object with matching labels is selected."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { by+: { resourceSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"MatchLabels ensures an object with matching labels is selected."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { by+: { resourceSelector+: { matchLabels+: matchLabels } } } }, + }, + '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { by+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind of the referent.\\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { by+: { kind: kind } } }, + }, + '#of':: d.obj(help='"Of is the resource that is \\"being used\\"."'), + of: { + '#resourceRef':: d.obj(help='"Reference to the resource."'), + resourceRef: { + '#withName':: d.fn(help='"Name of the referent."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { of+: { resourceRef+: { name: name } } } }, + }, + '#resourceSelector':: d.obj(help='"Selector to the resource.\\nThis field will be ignored if ResourceRef is set."'), + resourceSelector: { + '#withMatchControllerRef':: d.fn(help='"MatchControllerRef ensures an object with the same controller reference\\nas the selecting object is selected."', args=[d.arg(name='matchControllerRef', type=d.T.boolean)]), + withMatchControllerRef(matchControllerRef): { spec+: { of+: { resourceSelector+: { matchControllerRef: matchControllerRef } } } }, + '#withMatchLabels':: d.fn(help='"MatchLabels ensures an object with matching labels is selected."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { of+: { resourceSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"MatchLabels ensures an object with matching labels is selected."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { of+: { resourceSelector+: { matchLabels+: matchLabels } } } }, + }, + '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { of+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind of the referent.\\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { of+: { kind: kind } } }, + }, + '#withReason':: d.fn(help='"Reason is the reason for blocking deletion of the resource."', args=[d.arg(name='reason', type=d.T.string)]), + withReason(reason): { spec+: { reason: reason } }, + '#withReplayDeletion':: d.fn(help='"ReplayDeletion will trigger a deletion on the used resource during the deletion of the usage itself, if it was attempted to be deleted at least once."', args=[d.arg(name='replayDeletion', type=d.T.boolean)]), + withReplayDeletion(replayDeletion): { spec+: { replayDeletion: replayDeletion } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/apiextensions/v1beta1/compositionRevision.libsonnet b/crossplane/1.17/_gen/apiextensions/v1beta1/compositionRevision.libsonnet new file mode 100644 index 0000000..f8a426f --- /dev/null +++ b/crossplane/1.17/_gen/apiextensions/v1beta1/compositionRevision.libsonnet @@ -0,0 +1,590 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='compositionRevision', url='', help="\"A CompositionRevision represents a revision of a Composition. Crossplane\\ncreates new revisions when there are changes to the Composition.\\n\\n\\nCrossplane creates and manages CompositionRevisions. Don't directly edit\\nCompositionRevisions.\""), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of CompositionRevision', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'apiextensions.crossplane.io/v1beta1', + kind: 'CompositionRevision', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"CompositionRevisionSpec specifies the desired state of the composition\\nrevision."'), + spec: { + '#compositeTypeRef':: d.obj(help='"CompositeTypeRef specifies the type of composite resource that this\\ncomposition is compatible with."'), + compositeTypeRef: { + '#withApiVersion':: d.fn(help='"APIVersion of the type."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { compositeTypeRef+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind of the type."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { compositeTypeRef+: { kind: kind } } }, + }, + '#environment':: d.obj(help='"Environment configures the environment in which resources are rendered.\\n\\n\\nTHIS IS AN ALPHA FIELD. Do not use it in production. It is not honored\\nunless the relevant Crossplane feature flag is enabled, and may be\\nchanged or removed without notice."'), + environment: { + '#environmentConfigs':: d.obj(help='"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\\nresources are stored in the composite resource at\\n`spec.environmentConfigRefs` and is only updated if it is null.\\n\\n\\nThe list of references is used to compute an in-memory environment at\\ncompose time. The data of all object is merged in the order they are\\nlisted, meaning the values of EnvironmentConfigs with a larger index take\\npriority over ones with smaller indices.\\n\\n\\nThe computed environment can be accessed in a composition using\\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches."'), + environmentConfigs: { + '#ref':: d.obj(help='"Ref is a named reference to a single EnvironmentConfig.\\nEither Ref or Selector is required."'), + ref: { + '#withName':: d.fn(help='"The name of the object."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { ref+: { name: name } }, + }, + '#selector':: d.obj(help='"Selector selects EnvironmentConfig(s) via labels."'), + selector: { + '#matchLabels':: d.obj(help='"MatchLabels ensures an object with matching labels is selected."'), + matchLabels: { + '#withFromFieldPathPolicy':: d.fn(help='"FromFieldPathPolicy specifies the policy for the valueFromFieldPath.\\nThe default is Required, meaning that an error will be returned if the\\nfield is not found in the composite resource.\\nOptional means that if the field is not found in the composite resource,\\nthat label pair will just be skipped. N.B. other specified label\\nmatchers will still be used to retrieve the desired\\nenvironment config, if any."', args=[d.arg(name='fromFieldPathPolicy', type=d.T.string)]), + withFromFieldPathPolicy(fromFieldPathPolicy): { fromFieldPathPolicy: fromFieldPathPolicy }, + '#withKey':: d.fn(help='"Key of the label to match."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withType':: d.fn(help='"Type specifies where the value for a label comes from."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#withValue':: d.fn(help='"Value specifies a literal label value."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + '#withValueFromFieldPath':: d.fn(help='"ValueFromFieldPath specifies the field path to look for the label value."', args=[d.arg(name='valueFromFieldPath', type=d.T.string)]), + withValueFromFieldPath(valueFromFieldPath): { valueFromFieldPath: valueFromFieldPath }, + }, + '#withMatchLabels':: d.fn(help='"MatchLabels ensures an object with matching labels is selected."', args=[d.arg(name='matchLabels', type=d.T.array)]), + withMatchLabels(matchLabels): { selector+: { matchLabels: if std.isArray(v=matchLabels) then matchLabels else [matchLabels] } }, + '#withMatchLabelsMixin':: d.fn(help='"MatchLabels ensures an object with matching labels is selected."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.array)]), + withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: if std.isArray(v=matchLabels) then matchLabels else [matchLabels] } }, + '#withMaxMatch':: d.fn(help='"MaxMatch specifies the number of extracted EnvironmentConfigs in Multiple mode, extracts all if nil."', args=[d.arg(name='maxMatch', type=d.T.integer)]), + withMaxMatch(maxMatch): { selector+: { maxMatch: maxMatch } }, + '#withMinMatch':: d.fn(help='"MinMatch specifies the required minimum of extracted EnvironmentConfigs in Multiple mode."', args=[d.arg(name='minMatch', type=d.T.integer)]), + withMinMatch(minMatch): { selector+: { minMatch: minMatch } }, + '#withMode':: d.fn(help='"Mode specifies retrieval strategy: \\"Single\\" or \\"Multiple\\"."', args=[d.arg(name='mode', type=d.T.string)]), + withMode(mode): { selector+: { mode: mode } }, + '#withSortByFieldPath':: d.fn(help='"SortByFieldPath is the path to the field based on which list of EnvironmentConfigs is alphabetically sorted."', args=[d.arg(name='sortByFieldPath', type=d.T.string)]), + withSortByFieldPath(sortByFieldPath): { selector+: { sortByFieldPath: sortByFieldPath } }, + }, + '#withType':: d.fn(help='"Type specifies the way the EnvironmentConfig is selected.\\nDefault is `Reference`"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#patches':: d.obj(help="\"Patches is a list of environment patches that are executed before a\\ncomposition's resources are composed.\""), + patches: { + '#combine':: d.obj(help='"Combine is the patch configuration for a CombineFromComposite or\\nCombineToComposite patch."'), + combine: { + '#string':: d.obj(help='"String declares that input variables should be combined into a single\\nstring, using the relevant settings for formatting purposes."'), + string: { + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { combine+: { string+: { fmt: fmt } } }, + }, + '#variables':: d.obj(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."'), + variables: { + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the source whose value is\\nto be used as input."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + }, + '#withStrategy':: d.fn(help='"Strategy defines the strategy to use to combine the input variable values.\\nCurrently only string is supported."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { combine+: { strategy: strategy } }, + '#withVariables':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { combine+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { combine+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#policy':: d.obj(help='"Policy configures the specifics of patching behaviour."'), + policy: { + '#mergeOptions':: d.obj(help='"MergeOptions Specifies merge options on a field path."'), + mergeOptions: { + '#withAppendSlice':: d.fn(help='"Specifies that already existing elements in a merged slice should be preserved"', args=[d.arg(name='appendSlice', type=d.T.boolean)]), + withAppendSlice(appendSlice): { policy+: { mergeOptions+: { appendSlice: appendSlice } } }, + '#withKeepMapValues':: d.fn(help='"Specifies that already existing values in a merged map should be preserved"', args=[d.arg(name='keepMapValues', type=d.T.boolean)]), + withKeepMapValues(keepMapValues): { policy+: { mergeOptions+: { keepMapValues: keepMapValues } } }, + }, + '#withFromFieldPath':: d.fn(help="\"FromFieldPath specifies how to patch from a field path. The default is\\n'Optional', which means the patch will be a no-op if the specified\\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\\nthe specified path does not exist.\"", args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { policy+: { fromFieldPath: fromFieldPath } }, + }, + '#transforms':: d.obj(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."'), + transforms: { + '#convert':: d.obj(help='"Convert is used to cast the input into the given output type."'), + convert: { + '#withFormat':: d.fn(help='"The expected input format.\\n\\n\\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\\nOnly used during `string -> float64` conversions.\\n* `json` - parses the input as a JSON string.\\nOnly used during `string -> object` or `string -> list` conversions.\\n\\n\\nIf this property is null, the default conversion is applied."', args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { convert+: { format: format } }, + '#withToType':: d.fn(help='"ToType is the type of the output of this transform."', args=[d.arg(name='toType', type=d.T.string)]), + withToType(toType): { convert+: { toType: toType } }, + }, + '#match':: d.obj(help='"Match is a more complex version of Map that matches a list of patterns."'), + match: { + '#patterns':: d.obj(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."'), + patterns: { + '#withLiteral':: d.fn(help='"Literal exactly matches the input string (case sensitive).\\nIs required if `type` is `literal`."', args=[d.arg(name='literal', type=d.T.string)]), + withLiteral(literal): { literal: literal }, + '#withRegexp':: d.fn(help='"Regexp to match against the input string.\\nIs required if `type` is `regexp`."', args=[d.arg(name='regexp', type=d.T.string)]), + withRegexp(regexp): { regexp: regexp }, + '#withResult':: d.fn(help='"The value that is used as result of the transform if the pattern matches."', args=[d.arg(name='result', type=d.T.any)]), + withResult(result): { result: result }, + '#withType':: d.fn(help='"Type specifies how the pattern matches the input.\\n\\n\\n* `literal` - the pattern value has to exactly match (case sensitive) the\\ninput string. This is the default.\\n\\n\\n* `regexp` - the pattern treated as a regular expression against\\nwhich the input string is tested. Crossplane will throw an error if the\\nkey is not a valid regexp."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFallbackTo':: d.fn(help='"Determines to what value the transform should fallback if no pattern matches."', args=[d.arg(name='fallbackTo', type=d.T.string)]), + withFallbackTo(fallbackTo): { match+: { fallbackTo: fallbackTo } }, + '#withFallbackValue':: d.fn(help='"The fallback value that should be returned by the transform if now pattern\\nmatches."', args=[d.arg(name='fallbackValue', type=d.T.any)]), + withFallbackValue(fallbackValue): { match+: { fallbackValue: fallbackValue } }, + '#withPatterns':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."', args=[d.arg(name='patterns', type=d.T.array)]), + withPatterns(patterns): { match+: { patterns: if std.isArray(v=patterns) then patterns else [patterns] } }, + '#withPatternsMixin':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patterns', type=d.T.array)]), + withPatternsMixin(patterns): { match+: { patterns+: if std.isArray(v=patterns) then patterns else [patterns] } }, + }, + '#math':: d.obj(help='"Math is used to transform the input via mathematical operations such as\\nmultiplication."'), + math: { + '#withClampMax':: d.fn(help='"ClampMax makes sure that the value is not bigger than the given value."', args=[d.arg(name='clampMax', type=d.T.integer)]), + withClampMax(clampMax): { math+: { clampMax: clampMax } }, + '#withClampMin':: d.fn(help='"ClampMin makes sure that the value is not smaller than the given value."', args=[d.arg(name='clampMin', type=d.T.integer)]), + withClampMin(clampMin): { math+: { clampMin: clampMin } }, + '#withMultiply':: d.fn(help='"Multiply the value."', args=[d.arg(name='multiply', type=d.T.integer)]), + withMultiply(multiply): { math+: { multiply: multiply } }, + '#withType':: d.fn(help='"Type of the math transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { math+: { type: type } }, + }, + '#string':: d.obj(help='"String is used to transform the input into a string or a different kind\\nof string. Note that the input does not necessarily need to be a string."'), + string: { + '#join':: d.obj(help='"Join defines parameters to join a slice of values to a string."'), + join: { + '#withSeparator':: d.fn(help='"Separator defines the character that should separate the values from each\\nother in the joined string."', args=[d.arg(name='separator', type=d.T.string)]), + withSeparator(separator): { string+: { join+: { separator: separator } } }, + }, + '#regexp':: d.obj(help='"Extract a match from the input using a regular expression."'), + regexp: { + '#withGroup':: d.fn(help='"Group number to match. 0 (the default) matches the entire expression."', args=[d.arg(name='group', type=d.T.integer)]), + withGroup(group): { string+: { regexp+: { group: group } } }, + '#withMatch':: d.fn(help='"Match string. May optionally include submatches, aka capture groups.\\nSee https://pkg.go.dev/regexp/ for details."', args=[d.arg(name='match', type=d.T.string)]), + withMatch(match): { string+: { regexp+: { match: match } } }, + }, + '#withConvert':: d.fn(help='"Optional conversion method to be specified.\\n`ToUpper` and `ToLower` change the letter case of the input string.\\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\\n`ToJson` converts any input value into its raw JSON representation.\\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\\nconverted to JSON.\\n`ToAdler32` generate a addler32 hash based on the input string."', args=[d.arg(name='convert', type=d.T.string)]), + withConvert(convert): { string+: { convert: convert } }, + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { string+: { fmt: fmt } }, + '#withTrim':: d.fn(help='"Trim the prefix or suffix from the input"', args=[d.arg(name='trim', type=d.T.string)]), + withTrim(trim): { string+: { trim: trim } }, + '#withType':: d.fn(help='"Type of the string transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { string+: { type: type } }, + }, + '#withMap':: d.fn(help='"Map uses the input as a key in the given map and returns the value."', args=[d.arg(name='map', type=d.T.object)]), + withMap(map): { map: map }, + '#withMapMixin':: d.fn(help='"Map uses the input as a key in the given map and returns the value."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='map', type=d.T.object)]), + withMapMixin(map): { map+: map }, + '#withType':: d.fn(help='"Type of the transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the resource whose value is\\nto be used as input. Required when type is FromCompositeFieldPath or\\nToCompositeFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath is the path of the field on the resource whose value will\\nbe changed with the result of transforms. Leave empty if you'd like to\\npropagate to the same path as fromFieldPath.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { toFieldPath: toFieldPath }, + '#withTransforms':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."', args=[d.arg(name='transforms', type=d.T.array)]), + withTransforms(transforms): { transforms: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withTransformsMixin':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='transforms', type=d.T.array)]), + withTransformsMixin(transforms): { transforms+: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withType':: d.fn(help='"Type sets the patching behaviour to be used. Each patch type may require\\nits own fields to be set on the Patch object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#policy':: d.obj(help='"Policy represents the Resolve and Resolution policies which apply to\\nall EnvironmentSourceReferences in EnvironmentConfigs list."'), + policy: { + '#withResolution':: d.fn(help="\"Resolution specifies whether resolution of this reference is required.\\nThe default is 'Required', which means the reconcile will fail if the\\nreference cannot be resolved. 'Optional' means this reference will be\\na no-op if it cannot be resolved.\"", args=[d.arg(name='resolution', type=d.T.string)]), + withResolution(resolution): { spec+: { environment+: { policy+: { resolution: resolution } } } }, + '#withResolve':: d.fn(help="\"Resolve specifies when this reference should be resolved. The default\\nis 'IfNotPresent', which will attempt to resolve the reference only when\\nthe corresponding field is not present. Use 'Always' to resolve the\\nreference on every reconcile.\"", args=[d.arg(name='resolve', type=d.T.string)]), + withResolve(resolve): { spec+: { environment+: { policy+: { resolve: resolve } } } }, + }, + '#withDefaultData':: d.fn(help='"DefaultData statically defines the initial state of the environment.\\nIt has the same schema-less structure as the data field in\\nenvironment configs.\\nIt is overwritten by the selected environment configs."', args=[d.arg(name='defaultData', type=d.T.object)]), + withDefaultData(defaultData): { spec+: { environment+: { defaultData: defaultData } } }, + '#withDefaultDataMixin':: d.fn(help='"DefaultData statically defines the initial state of the environment.\\nIt has the same schema-less structure as the data field in\\nenvironment configs.\\nIt is overwritten by the selected environment configs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='defaultData', type=d.T.object)]), + withDefaultDataMixin(defaultData): { spec+: { environment+: { defaultData+: defaultData } } }, + '#withEnvironmentConfigs':: d.fn(help='"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\\nresources are stored in the composite resource at\\n`spec.environmentConfigRefs` and is only updated if it is null.\\n\\n\\nThe list of references is used to compute an in-memory environment at\\ncompose time. The data of all object is merged in the order they are\\nlisted, meaning the values of EnvironmentConfigs with a larger index take\\npriority over ones with smaller indices.\\n\\n\\nThe computed environment can be accessed in a composition using\\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches."', args=[d.arg(name='environmentConfigs', type=d.T.array)]), + withEnvironmentConfigs(environmentConfigs): { spec+: { environment+: { environmentConfigs: if std.isArray(v=environmentConfigs) then environmentConfigs else [environmentConfigs] } } }, + '#withEnvironmentConfigsMixin':: d.fn(help='"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\\nresources are stored in the composite resource at\\n`spec.environmentConfigRefs` and is only updated if it is null.\\n\\n\\nThe list of references is used to compute an in-memory environment at\\ncompose time. The data of all object is merged in the order they are\\nlisted, meaning the values of EnvironmentConfigs with a larger index take\\npriority over ones with smaller indices.\\n\\n\\nThe computed environment can be accessed in a composition using\\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='environmentConfigs', type=d.T.array)]), + withEnvironmentConfigsMixin(environmentConfigs): { spec+: { environment+: { environmentConfigs+: if std.isArray(v=environmentConfigs) then environmentConfigs else [environmentConfigs] } } }, + '#withPatches':: d.fn(help="\"Patches is a list of environment patches that are executed before a\\ncomposition's resources are composed.\"", args=[d.arg(name='patches', type=d.T.array)]), + withPatches(patches): { spec+: { environment+: { patches: if std.isArray(v=patches) then patches else [patches] } } }, + '#withPatchesMixin':: d.fn(help="\"Patches is a list of environment patches that are executed before a\\ncomposition's resources are composed.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='patches', type=d.T.array)]), + withPatchesMixin(patches): { spec+: { environment+: { patches+: if std.isArray(v=patches) then patches else [patches] } } }, + }, + '#patchSets':: d.obj(help='"PatchSets define a named set of patches that may be included by any\\nresource in this Composition. PatchSets cannot themselves refer to other\\nPatchSets.\\n\\n\\nPatchSets are only used by the \\"Resources\\" mode of Composition. They\\nare ignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."'), + patchSets: { + '#patches':: d.obj(help='"Patches will be applied as an overlay to the base resource."'), + patches: { + '#combine':: d.obj(help='"Combine is the patch configuration for a CombineFromComposite,\\nCombineFromEnvironment, CombineToComposite or CombineToEnvironment patch."'), + combine: { + '#string':: d.obj(help='"String declares that input variables should be combined into a single\\nstring, using the relevant settings for formatting purposes."'), + string: { + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { combine+: { string+: { fmt: fmt } } }, + }, + '#variables':: d.obj(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."'), + variables: { + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the source whose value is\\nto be used as input."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + }, + '#withStrategy':: d.fn(help='"Strategy defines the strategy to use to combine the input variable values.\\nCurrently only string is supported."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { combine+: { strategy: strategy } }, + '#withVariables':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { combine+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { combine+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#policy':: d.obj(help='"Policy configures the specifics of patching behaviour."'), + policy: { + '#mergeOptions':: d.obj(help='"MergeOptions Specifies merge options on a field path."'), + mergeOptions: { + '#withAppendSlice':: d.fn(help='"Specifies that already existing elements in a merged slice should be preserved"', args=[d.arg(name='appendSlice', type=d.T.boolean)]), + withAppendSlice(appendSlice): { policy+: { mergeOptions+: { appendSlice: appendSlice } } }, + '#withKeepMapValues':: d.fn(help='"Specifies that already existing values in a merged map should be preserved"', args=[d.arg(name='keepMapValues', type=d.T.boolean)]), + withKeepMapValues(keepMapValues): { policy+: { mergeOptions+: { keepMapValues: keepMapValues } } }, + }, + '#withFromFieldPath':: d.fn(help="\"FromFieldPath specifies how to patch from a field path. The default is\\n'Optional', which means the patch will be a no-op if the specified\\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\\nthe specified path does not exist.\"", args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { policy+: { fromFieldPath: fromFieldPath } }, + }, + '#transforms':: d.obj(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."'), + transforms: { + '#convert':: d.obj(help='"Convert is used to cast the input into the given output type."'), + convert: { + '#withFormat':: d.fn(help='"The expected input format.\\n\\n\\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\\nOnly used during `string -> float64` conversions.\\n* `json` - parses the input as a JSON string.\\nOnly used during `string -> object` or `string -> list` conversions.\\n\\n\\nIf this property is null, the default conversion is applied."', args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { convert+: { format: format } }, + '#withToType':: d.fn(help='"ToType is the type of the output of this transform."', args=[d.arg(name='toType', type=d.T.string)]), + withToType(toType): { convert+: { toType: toType } }, + }, + '#match':: d.obj(help='"Match is a more complex version of Map that matches a list of patterns."'), + match: { + '#patterns':: d.obj(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."'), + patterns: { + '#withLiteral':: d.fn(help='"Literal exactly matches the input string (case sensitive).\\nIs required if `type` is `literal`."', args=[d.arg(name='literal', type=d.T.string)]), + withLiteral(literal): { literal: literal }, + '#withRegexp':: d.fn(help='"Regexp to match against the input string.\\nIs required if `type` is `regexp`."', args=[d.arg(name='regexp', type=d.T.string)]), + withRegexp(regexp): { regexp: regexp }, + '#withResult':: d.fn(help='"The value that is used as result of the transform if the pattern matches."', args=[d.arg(name='result', type=d.T.any)]), + withResult(result): { result: result }, + '#withType':: d.fn(help='"Type specifies how the pattern matches the input.\\n\\n\\n* `literal` - the pattern value has to exactly match (case sensitive) the\\ninput string. This is the default.\\n\\n\\n* `regexp` - the pattern treated as a regular expression against\\nwhich the input string is tested. Crossplane will throw an error if the\\nkey is not a valid regexp."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFallbackTo':: d.fn(help='"Determines to what value the transform should fallback if no pattern matches."', args=[d.arg(name='fallbackTo', type=d.T.string)]), + withFallbackTo(fallbackTo): { match+: { fallbackTo: fallbackTo } }, + '#withFallbackValue':: d.fn(help='"The fallback value that should be returned by the transform if now pattern\\nmatches."', args=[d.arg(name='fallbackValue', type=d.T.any)]), + withFallbackValue(fallbackValue): { match+: { fallbackValue: fallbackValue } }, + '#withPatterns':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."', args=[d.arg(name='patterns', type=d.T.array)]), + withPatterns(patterns): { match+: { patterns: if std.isArray(v=patterns) then patterns else [patterns] } }, + '#withPatternsMixin':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patterns', type=d.T.array)]), + withPatternsMixin(patterns): { match+: { patterns+: if std.isArray(v=patterns) then patterns else [patterns] } }, + }, + '#math':: d.obj(help='"Math is used to transform the input via mathematical operations such as\\nmultiplication."'), + math: { + '#withClampMax':: d.fn(help='"ClampMax makes sure that the value is not bigger than the given value."', args=[d.arg(name='clampMax', type=d.T.integer)]), + withClampMax(clampMax): { math+: { clampMax: clampMax } }, + '#withClampMin':: d.fn(help='"ClampMin makes sure that the value is not smaller than the given value."', args=[d.arg(name='clampMin', type=d.T.integer)]), + withClampMin(clampMin): { math+: { clampMin: clampMin } }, + '#withMultiply':: d.fn(help='"Multiply the value."', args=[d.arg(name='multiply', type=d.T.integer)]), + withMultiply(multiply): { math+: { multiply: multiply } }, + '#withType':: d.fn(help='"Type of the math transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { math+: { type: type } }, + }, + '#string':: d.obj(help='"String is used to transform the input into a string or a different kind\\nof string. Note that the input does not necessarily need to be a string."'), + string: { + '#join':: d.obj(help='"Join defines parameters to join a slice of values to a string."'), + join: { + '#withSeparator':: d.fn(help='"Separator defines the character that should separate the values from each\\nother in the joined string."', args=[d.arg(name='separator', type=d.T.string)]), + withSeparator(separator): { string+: { join+: { separator: separator } } }, + }, + '#regexp':: d.obj(help='"Extract a match from the input using a regular expression."'), + regexp: { + '#withGroup':: d.fn(help='"Group number to match. 0 (the default) matches the entire expression."', args=[d.arg(name='group', type=d.T.integer)]), + withGroup(group): { string+: { regexp+: { group: group } } }, + '#withMatch':: d.fn(help='"Match string. May optionally include submatches, aka capture groups.\\nSee https://pkg.go.dev/regexp/ for details."', args=[d.arg(name='match', type=d.T.string)]), + withMatch(match): { string+: { regexp+: { match: match } } }, + }, + '#withConvert':: d.fn(help='"Optional conversion method to be specified.\\n`ToUpper` and `ToLower` change the letter case of the input string.\\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\\n`ToJson` converts any input value into its raw JSON representation.\\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\\nconverted to JSON.\\n`ToAdler32` generate a addler32 hash based on the input string."', args=[d.arg(name='convert', type=d.T.string)]), + withConvert(convert): { string+: { convert: convert } }, + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { string+: { fmt: fmt } }, + '#withTrim':: d.fn(help='"Trim the prefix or suffix from the input"', args=[d.arg(name='trim', type=d.T.string)]), + withTrim(trim): { string+: { trim: trim } }, + '#withType':: d.fn(help='"Type of the string transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { string+: { type: type } }, + }, + '#withMap':: d.fn(help='"Map uses the input as a key in the given map and returns the value."', args=[d.arg(name='map', type=d.T.object)]), + withMap(map): { map: map }, + '#withMapMixin':: d.fn(help='"Map uses the input as a key in the given map and returns the value."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='map', type=d.T.object)]), + withMapMixin(map): { map+: map }, + '#withType':: d.fn(help='"Type of the transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the resource whose value is\\nto be used as input. Required when type is FromCompositeFieldPath,\\nFromEnvironmentFieldPath, ToCompositeFieldPath, ToEnvironmentFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withPatchSetName':: d.fn(help='"PatchSetName to include patches from. Required when type is PatchSet."', args=[d.arg(name='patchSetName', type=d.T.string)]), + withPatchSetName(patchSetName): { patchSetName: patchSetName }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath is the path of the field on the resource whose value will\\nbe changed with the result of transforms. Leave empty if you'd like to\\npropagate to the same path as fromFieldPath.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { toFieldPath: toFieldPath }, + '#withTransforms':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."', args=[d.arg(name='transforms', type=d.T.array)]), + withTransforms(transforms): { transforms: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withTransformsMixin':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='transforms', type=d.T.array)]), + withTransformsMixin(transforms): { transforms+: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withType':: d.fn(help='"Type sets the patching behaviour to be used. Each patch type may require\\nits own fields to be set on the Patch object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withName':: d.fn(help='"Name of this PatchSet."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPatches':: d.fn(help='"Patches will be applied as an overlay to the base resource."', args=[d.arg(name='patches', type=d.T.array)]), + withPatches(patches): { patches: if std.isArray(v=patches) then patches else [patches] }, + '#withPatchesMixin':: d.fn(help='"Patches will be applied as an overlay to the base resource."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patches', type=d.T.array)]), + withPatchesMixin(patches): { patches+: if std.isArray(v=patches) then patches else [patches] }, + }, + '#pipeline':: d.obj(help='"Pipeline is a list of composition function steps that will be used when a\\ncomposite resource referring to this composition is created. One of\\nresources and pipeline must be specified - you cannot specify both.\\n\\n\\nThe Pipeline is only used by the \\"Pipeline\\" mode of Composition. It is\\nignored by other modes."'), + pipeline: { + '#credentials':: d.obj(help='"Credentials are optional credentials that the Composition Function needs."'), + credentials: { + '#secretRef':: d.obj(help='"A SecretRef is a reference to a secret containing credentials that should\\nbe supplied to the function."'), + secretRef: { + '#withName':: d.fn(help='"Name of the secret."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace of the secret."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { secretRef+: { namespace: namespace } }, + }, + '#withName':: d.fn(help='"Name of this set of credentials."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withSource':: d.fn(help='"Source of the function credentials."', args=[d.arg(name='source', type=d.T.string)]), + withSource(source): { source: source }, + }, + '#functionRef':: d.obj(help='"FunctionRef is a reference to the Composition Function this step should\\nexecute."'), + functionRef: { + '#withName':: d.fn(help='"Name of the referenced Function."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { functionRef+: { name: name } }, + }, + '#withCredentials':: d.fn(help='"Credentials are optional credentials that the Composition Function needs."', args=[d.arg(name='credentials', type=d.T.array)]), + withCredentials(credentials): { credentials: if std.isArray(v=credentials) then credentials else [credentials] }, + '#withCredentialsMixin':: d.fn(help='"Credentials are optional credentials that the Composition Function needs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='credentials', type=d.T.array)]), + withCredentialsMixin(credentials): { credentials+: if std.isArray(v=credentials) then credentials else [credentials] }, + '#withInput':: d.fn(help="\"Input is an optional, arbitrary Kubernetes resource (i.e. a resource\\nwith an apiVersion and kind) that will be passed to the Composition\\nFunction as the 'input' of its RunFunctionRequest.\"", args=[d.arg(name='input', type=d.T.object)]), + withInput(input): { input: input }, + '#withInputMixin':: d.fn(help="\"Input is an optional, arbitrary Kubernetes resource (i.e. a resource\\nwith an apiVersion and kind) that will be passed to the Composition\\nFunction as the 'input' of its RunFunctionRequest.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='input', type=d.T.object)]), + withInputMixin(input): { input+: input }, + '#withStep':: d.fn(help='"Step name. Must be unique within its Pipeline."', args=[d.arg(name='step', type=d.T.string)]), + withStep(step): { step: step }, + }, + '#publishConnectionDetailsWithStoreConfigRef':: d.obj(help='"PublishConnectionDetailsWithStoreConfig specifies the secret store config\\nwith which the connection details of composite resources dynamically\\nprovisioned using this composition will be published.\\n\\n\\nTHIS IS AN ALPHA FIELD. Do not use it in production. It is not honored\\nunless the relevant Crossplane feature flag is enabled, and may be\\nchanged or removed without notice."'), + publishConnectionDetailsWithStoreConfigRef: { + '#withName':: d.fn(help='"Name of the referenced StoreConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { publishConnectionDetailsWithStoreConfigRef+: { name: name } } }, + }, + '#resources':: d.obj(help='"Resources is a list of resource templates that will be used when a\\ncomposite resource referring to this composition is created.\\n\\n\\nResources are only used by the \\"Resources\\" mode of Composition. They are\\nignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."'), + resources: { + '#connectionDetails':: d.obj(help='"ConnectionDetails lists the propagation secret keys from this target\\nresource to the composition instance connection secret."'), + connectionDetails: { + '#withFromConnectionSecretKey':: d.fn(help="\"FromConnectionSecretKey is the key that will be used to fetch the value\\nfrom the composed resource's connection secret.\"", args=[d.arg(name='fromConnectionSecretKey', type=d.T.string)]), + withFromConnectionSecretKey(fromConnectionSecretKey): { fromConnectionSecretKey: fromConnectionSecretKey }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the composed resource whose\\nvalue to be used as input. Name must be specified if the type is\\nFromFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withName':: d.fn(help="\"Name of the connection secret key that will be propagated to the\\nconnection secret of the composition instance. Leave empty if you'd like\\nto use the same key name.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withType':: d.fn(help='"Type sets the connection detail fetching behaviour to be used. Each\\nconnection detail type may require its own fields to be set on the\\nConnectionDetail object. If the type is omitted Crossplane will attempt\\nto infer it based on which other fields were specified. If multiple\\nfields are specified the order of precedence is:\\n1. FromValue\\n2. FromConnectionSecretKey\\n3. FromFieldPath"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#withValue':: d.fn(help='"Value that will be propagated to the connection secret of the composite\\nresource. May be set to inject a fixed, non-sensitive connection secret\\nvalue, for example a well-known port."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#patches':: d.obj(help='"Patches will be applied as overlay to the base resource."'), + patches: { + '#combine':: d.obj(help='"Combine is the patch configuration for a CombineFromComposite,\\nCombineFromEnvironment, CombineToComposite or CombineToEnvironment patch."'), + combine: { + '#string':: d.obj(help='"String declares that input variables should be combined into a single\\nstring, using the relevant settings for formatting purposes."'), + string: { + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { combine+: { string+: { fmt: fmt } } }, + }, + '#variables':: d.obj(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."'), + variables: { + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the source whose value is\\nto be used as input."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + }, + '#withStrategy':: d.fn(help='"Strategy defines the strategy to use to combine the input variable values.\\nCurrently only string is supported."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { combine+: { strategy: strategy } }, + '#withVariables':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { combine+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { combine+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#policy':: d.obj(help='"Policy configures the specifics of patching behaviour."'), + policy: { + '#mergeOptions':: d.obj(help='"MergeOptions Specifies merge options on a field path."'), + mergeOptions: { + '#withAppendSlice':: d.fn(help='"Specifies that already existing elements in a merged slice should be preserved"', args=[d.arg(name='appendSlice', type=d.T.boolean)]), + withAppendSlice(appendSlice): { policy+: { mergeOptions+: { appendSlice: appendSlice } } }, + '#withKeepMapValues':: d.fn(help='"Specifies that already existing values in a merged map should be preserved"', args=[d.arg(name='keepMapValues', type=d.T.boolean)]), + withKeepMapValues(keepMapValues): { policy+: { mergeOptions+: { keepMapValues: keepMapValues } } }, + }, + '#withFromFieldPath':: d.fn(help="\"FromFieldPath specifies how to patch from a field path. The default is\\n'Optional', which means the patch will be a no-op if the specified\\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\\nthe specified path does not exist.\"", args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { policy+: { fromFieldPath: fromFieldPath } }, + }, + '#transforms':: d.obj(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."'), + transforms: { + '#convert':: d.obj(help='"Convert is used to cast the input into the given output type."'), + convert: { + '#withFormat':: d.fn(help='"The expected input format.\\n\\n\\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\\nOnly used during `string -> float64` conversions.\\n* `json` - parses the input as a JSON string.\\nOnly used during `string -> object` or `string -> list` conversions.\\n\\n\\nIf this property is null, the default conversion is applied."', args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { convert+: { format: format } }, + '#withToType':: d.fn(help='"ToType is the type of the output of this transform."', args=[d.arg(name='toType', type=d.T.string)]), + withToType(toType): { convert+: { toType: toType } }, + }, + '#match':: d.obj(help='"Match is a more complex version of Map that matches a list of patterns."'), + match: { + '#patterns':: d.obj(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."'), + patterns: { + '#withLiteral':: d.fn(help='"Literal exactly matches the input string (case sensitive).\\nIs required if `type` is `literal`."', args=[d.arg(name='literal', type=d.T.string)]), + withLiteral(literal): { literal: literal }, + '#withRegexp':: d.fn(help='"Regexp to match against the input string.\\nIs required if `type` is `regexp`."', args=[d.arg(name='regexp', type=d.T.string)]), + withRegexp(regexp): { regexp: regexp }, + '#withResult':: d.fn(help='"The value that is used as result of the transform if the pattern matches."', args=[d.arg(name='result', type=d.T.any)]), + withResult(result): { result: result }, + '#withType':: d.fn(help='"Type specifies how the pattern matches the input.\\n\\n\\n* `literal` - the pattern value has to exactly match (case sensitive) the\\ninput string. This is the default.\\n\\n\\n* `regexp` - the pattern treated as a regular expression against\\nwhich the input string is tested. Crossplane will throw an error if the\\nkey is not a valid regexp."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFallbackTo':: d.fn(help='"Determines to what value the transform should fallback if no pattern matches."', args=[d.arg(name='fallbackTo', type=d.T.string)]), + withFallbackTo(fallbackTo): { match+: { fallbackTo: fallbackTo } }, + '#withFallbackValue':: d.fn(help='"The fallback value that should be returned by the transform if now pattern\\nmatches."', args=[d.arg(name='fallbackValue', type=d.T.any)]), + withFallbackValue(fallbackValue): { match+: { fallbackValue: fallbackValue } }, + '#withPatterns':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."', args=[d.arg(name='patterns', type=d.T.array)]), + withPatterns(patterns): { match+: { patterns: if std.isArray(v=patterns) then patterns else [patterns] } }, + '#withPatternsMixin':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patterns', type=d.T.array)]), + withPatternsMixin(patterns): { match+: { patterns+: if std.isArray(v=patterns) then patterns else [patterns] } }, + }, + '#math':: d.obj(help='"Math is used to transform the input via mathematical operations such as\\nmultiplication."'), + math: { + '#withClampMax':: d.fn(help='"ClampMax makes sure that the value is not bigger than the given value."', args=[d.arg(name='clampMax', type=d.T.integer)]), + withClampMax(clampMax): { math+: { clampMax: clampMax } }, + '#withClampMin':: d.fn(help='"ClampMin makes sure that the value is not smaller than the given value."', args=[d.arg(name='clampMin', type=d.T.integer)]), + withClampMin(clampMin): { math+: { clampMin: clampMin } }, + '#withMultiply':: d.fn(help='"Multiply the value."', args=[d.arg(name='multiply', type=d.T.integer)]), + withMultiply(multiply): { math+: { multiply: multiply } }, + '#withType':: d.fn(help='"Type of the math transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { math+: { type: type } }, + }, + '#string':: d.obj(help='"String is used to transform the input into a string or a different kind\\nof string. Note that the input does not necessarily need to be a string."'), + string: { + '#join':: d.obj(help='"Join defines parameters to join a slice of values to a string."'), + join: { + '#withSeparator':: d.fn(help='"Separator defines the character that should separate the values from each\\nother in the joined string."', args=[d.arg(name='separator', type=d.T.string)]), + withSeparator(separator): { string+: { join+: { separator: separator } } }, + }, + '#regexp':: d.obj(help='"Extract a match from the input using a regular expression."'), + regexp: { + '#withGroup':: d.fn(help='"Group number to match. 0 (the default) matches the entire expression."', args=[d.arg(name='group', type=d.T.integer)]), + withGroup(group): { string+: { regexp+: { group: group } } }, + '#withMatch':: d.fn(help='"Match string. May optionally include submatches, aka capture groups.\\nSee https://pkg.go.dev/regexp/ for details."', args=[d.arg(name='match', type=d.T.string)]), + withMatch(match): { string+: { regexp+: { match: match } } }, + }, + '#withConvert':: d.fn(help='"Optional conversion method to be specified.\\n`ToUpper` and `ToLower` change the letter case of the input string.\\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\\n`ToJson` converts any input value into its raw JSON representation.\\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\\nconverted to JSON.\\n`ToAdler32` generate a addler32 hash based on the input string."', args=[d.arg(name='convert', type=d.T.string)]), + withConvert(convert): { string+: { convert: convert } }, + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { string+: { fmt: fmt } }, + '#withTrim':: d.fn(help='"Trim the prefix or suffix from the input"', args=[d.arg(name='trim', type=d.T.string)]), + withTrim(trim): { string+: { trim: trim } }, + '#withType':: d.fn(help='"Type of the string transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { string+: { type: type } }, + }, + '#withMap':: d.fn(help='"Map uses the input as a key in the given map and returns the value."', args=[d.arg(name='map', type=d.T.object)]), + withMap(map): { map: map }, + '#withMapMixin':: d.fn(help='"Map uses the input as a key in the given map and returns the value."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='map', type=d.T.object)]), + withMapMixin(map): { map+: map }, + '#withType':: d.fn(help='"Type of the transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the resource whose value is\\nto be used as input. Required when type is FromCompositeFieldPath,\\nFromEnvironmentFieldPath, ToCompositeFieldPath, ToEnvironmentFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withPatchSetName':: d.fn(help='"PatchSetName to include patches from. Required when type is PatchSet."', args=[d.arg(name='patchSetName', type=d.T.string)]), + withPatchSetName(patchSetName): { patchSetName: patchSetName }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath is the path of the field on the resource whose value will\\nbe changed with the result of transforms. Leave empty if you'd like to\\npropagate to the same path as fromFieldPath.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { toFieldPath: toFieldPath }, + '#withTransforms':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."', args=[d.arg(name='transforms', type=d.T.array)]), + withTransforms(transforms): { transforms: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withTransformsMixin':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='transforms', type=d.T.array)]), + withTransformsMixin(transforms): { transforms+: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withType':: d.fn(help='"Type sets the patching behaviour to be used. Each patch type may require\\nits own fields to be set on the Patch object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#readinessChecks':: d.obj(help='"ReadinessChecks allows users to define custom readiness checks. All checks\\nhave to return true in order for resource to be considered ready. The\\ndefault readiness check is to have the \\"Ready\\" condition to be \\"True\\"."'), + readinessChecks: { + '#matchCondition':: d.obj(help="\"MatchCondition specifies the condition you'd like to match if you're using \\\"MatchCondition\\\" type.\""), + matchCondition: { + '#withType':: d.fn(help="\"Type indicates the type of condition you'd like to use.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { matchCondition+: { type: type } }, + }, + '#withFieldPath':: d.fn(help='"FieldPath shows the path of the field whose value will be used."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { fieldPath: fieldPath }, + '#withMatchInteger':: d.fn(help="\"MatchInt is the value you'd like to match if you're using \\\"MatchInt\\\" type.\"", args=[d.arg(name='matchInteger', type=d.T.integer)]), + withMatchInteger(matchInteger): { matchInteger: matchInteger }, + '#withMatchString':: d.fn(help="\"MatchString is the value you'd like to match if you're using \\\"MatchString\\\" type.\"", args=[d.arg(name='matchString', type=d.T.string)]), + withMatchString(matchString): { matchString: matchString }, + '#withType':: d.fn(help="\"Type indicates the type of probe you'd like to use.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withBase':: d.fn(help='"Base is the target resource that the patches will be applied on."', args=[d.arg(name='base', type=d.T.object)]), + withBase(base): { base: base }, + '#withBaseMixin':: d.fn(help='"Base is the target resource that the patches will be applied on."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='base', type=d.T.object)]), + withBaseMixin(base): { base+: base }, + '#withConnectionDetails':: d.fn(help='"ConnectionDetails lists the propagation secret keys from this target\\nresource to the composition instance connection secret."', args=[d.arg(name='connectionDetails', type=d.T.array)]), + withConnectionDetails(connectionDetails): { connectionDetails: if std.isArray(v=connectionDetails) then connectionDetails else [connectionDetails] }, + '#withConnectionDetailsMixin':: d.fn(help='"ConnectionDetails lists the propagation secret keys from this target\\nresource to the composition instance connection secret."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='connectionDetails', type=d.T.array)]), + withConnectionDetailsMixin(connectionDetails): { connectionDetails+: if std.isArray(v=connectionDetails) then connectionDetails else [connectionDetails] }, + '#withName':: d.fn(help="\"A Name uniquely identifies this entry within its Composition's resources\\narray. Names are optional but *strongly* recommended. When all entries in\\nthe resources array are named entries may added, deleted, and reordered\\nas long as their names do not change. When entries are not named the\\nlength and order of the resources array should be treated as immutable.\\nEither all or no entries must be named.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPatches':: d.fn(help='"Patches will be applied as overlay to the base resource."', args=[d.arg(name='patches', type=d.T.array)]), + withPatches(patches): { patches: if std.isArray(v=patches) then patches else [patches] }, + '#withPatchesMixin':: d.fn(help='"Patches will be applied as overlay to the base resource."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patches', type=d.T.array)]), + withPatchesMixin(patches): { patches+: if std.isArray(v=patches) then patches else [patches] }, + '#withReadinessChecks':: d.fn(help='"ReadinessChecks allows users to define custom readiness checks. All checks\\nhave to return true in order for resource to be considered ready. The\\ndefault readiness check is to have the \\"Ready\\" condition to be \\"True\\"."', args=[d.arg(name='readinessChecks', type=d.T.array)]), + withReadinessChecks(readinessChecks): { readinessChecks: if std.isArray(v=readinessChecks) then readinessChecks else [readinessChecks] }, + '#withReadinessChecksMixin':: d.fn(help='"ReadinessChecks allows users to define custom readiness checks. All checks\\nhave to return true in order for resource to be considered ready. The\\ndefault readiness check is to have the \\"Ready\\" condition to be \\"True\\"."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessChecks', type=d.T.array)]), + withReadinessChecksMixin(readinessChecks): { readinessChecks+: if std.isArray(v=readinessChecks) then readinessChecks else [readinessChecks] }, + }, + '#withMode':: d.fn(help="\"Mode controls what type or \\\"mode\\\" of Composition will be used.\\n\\n\\n\\\"Pipeline\\\" indicates that a Composition specifies a pipeline of\\nComposition Functions, each of which is responsible for producing\\ncomposed resources that Crossplane should create or update.\\n\\n\\n\\\"Resources\\\" indicates that a Composition uses what is commonly referred\\nto as \\\"Patch \u0026 Transform\\\" or P\u0026T composition. This mode of Composition\\nuses an array of resources, each a template for a composed resource.\\n\\n\\nAll Compositions should use Pipeline mode. Resources mode is deprecated.\\nResources mode won't be removed in Crossplane 1.x, and will remain the\\ndefault to avoid breaking legacy Compositions. However, it's no longer\\naccepting new features, and only accepting security related bug fixes.\"", args=[d.arg(name='mode', type=d.T.string)]), + withMode(mode): { spec+: { mode: mode } }, + '#withPatchSets':: d.fn(help='"PatchSets define a named set of patches that may be included by any\\nresource in this Composition. PatchSets cannot themselves refer to other\\nPatchSets.\\n\\n\\nPatchSets are only used by the \\"Resources\\" mode of Composition. They\\nare ignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."', args=[d.arg(name='patchSets', type=d.T.array)]), + withPatchSets(patchSets): { spec+: { patchSets: if std.isArray(v=patchSets) then patchSets else [patchSets] } }, + '#withPatchSetsMixin':: d.fn(help='"PatchSets define a named set of patches that may be included by any\\nresource in this Composition. PatchSets cannot themselves refer to other\\nPatchSets.\\n\\n\\nPatchSets are only used by the \\"Resources\\" mode of Composition. They\\nare ignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patchSets', type=d.T.array)]), + withPatchSetsMixin(patchSets): { spec+: { patchSets+: if std.isArray(v=patchSets) then patchSets else [patchSets] } }, + '#withPipeline':: d.fn(help='"Pipeline is a list of composition function steps that will be used when a\\ncomposite resource referring to this composition is created. One of\\nresources and pipeline must be specified - you cannot specify both.\\n\\n\\nThe Pipeline is only used by the \\"Pipeline\\" mode of Composition. It is\\nignored by other modes."', args=[d.arg(name='pipeline', type=d.T.array)]), + withPipeline(pipeline): { spec+: { pipeline: if std.isArray(v=pipeline) then pipeline else [pipeline] } }, + '#withPipelineMixin':: d.fn(help='"Pipeline is a list of composition function steps that will be used when a\\ncomposite resource referring to this composition is created. One of\\nresources and pipeline must be specified - you cannot specify both.\\n\\n\\nThe Pipeline is only used by the \\"Pipeline\\" mode of Composition. It is\\nignored by other modes."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='pipeline', type=d.T.array)]), + withPipelineMixin(pipeline): { spec+: { pipeline+: if std.isArray(v=pipeline) then pipeline else [pipeline] } }, + '#withResources':: d.fn(help='"Resources is a list of resource templates that will be used when a\\ncomposite resource referring to this composition is created.\\n\\n\\nResources are only used by the \\"Resources\\" mode of Composition. They are\\nignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."', args=[d.arg(name='resources', type=d.T.array)]), + withResources(resources): { spec+: { resources: if std.isArray(v=resources) then resources else [resources] } }, + '#withResourcesMixin':: d.fn(help='"Resources is a list of resource templates that will be used when a\\ncomposite resource referring to this composition is created.\\n\\n\\nResources are only used by the \\"Resources\\" mode of Composition. They are\\nignored by other modes.\\n\\n\\nDeprecated: Use Composition Functions instead."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resources', type=d.T.array)]), + withResourcesMixin(resources): { spec+: { resources+: if std.isArray(v=resources) then resources else [resources] } }, + '#withRevision':: d.fn(help='"Revision number. Newer revisions have larger numbers."', args=[d.arg(name='revision', type=d.T.integer)]), + withRevision(revision): { spec+: { revision: revision } }, + '#withWriteConnectionSecretsToNamespace':: d.fn(help='"WriteConnectionSecretsToNamespace specifies the namespace in which the\\nconnection secrets of composite resource dynamically provisioned using\\nthis composition will be created.\\nThis field is planned to be replaced in a future release in favor of\\nPublishConnectionDetailsWithStoreConfigRef. Currently, both could be\\nset independently and connection details would be published to both\\nwithout affecting each other as long as related fields at MR level\\nspecified."', args=[d.arg(name='writeConnectionSecretsToNamespace', type=d.T.string)]), + withWriteConnectionSecretsToNamespace(writeConnectionSecretsToNamespace): { spec+: { writeConnectionSecretsToNamespace: writeConnectionSecretsToNamespace } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/apiextensions/v1beta1/main.libsonnet b/crossplane/1.17/_gen/apiextensions/v1beta1/main.libsonnet new file mode 100644 index 0000000..8098f8d --- /dev/null +++ b/crossplane/1.17/_gen/apiextensions/v1beta1/main.libsonnet @@ -0,0 +1,5 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1beta1', url='', help=''), + compositionRevision: (import 'compositionRevision.libsonnet'), +} diff --git a/crossplane/1.17/_gen/meta/main.libsonnet b/crossplane/1.17/_gen/meta/main.libsonnet new file mode 100644 index 0000000..50b93fb --- /dev/null +++ b/crossplane/1.17/_gen/meta/main.libsonnet @@ -0,0 +1,7 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='meta', url='', help=''), + v1: (import 'v1/main.libsonnet'), + v1alpha1: (import 'v1alpha1/main.libsonnet'), + v1beta1: (import 'v1beta1/main.libsonnet'), +} diff --git a/crossplane/1.17/_gen/meta/v1/configuration.libsonnet b/crossplane/1.17/_gen/meta/v1/configuration.libsonnet new file mode 100644 index 0000000..3f49d9b --- /dev/null +++ b/crossplane/1.17/_gen/meta/v1/configuration.libsonnet @@ -0,0 +1,75 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='configuration', url='', help='"A Configuration is the description of a Crossplane Configuration package."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Configuration', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'meta.pkg.crossplane.io/v1', + kind: 'Configuration', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ConfigurationSpec specifies the configuration of a Configuration."'), + spec: { + '#crossplane':: d.obj(help='"Semantic version constraints of Crossplane that package is compatible with."'), + crossplane: { + '#withVersion':: d.fn(help='"Semantic version constraints of Crossplane that package is compatible with."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { spec+: { crossplane+: { version: version } } }, + }, + '#dependsOn':: d.obj(help='"Dependencies on other packages."'), + dependsOn: { + '#withConfiguration':: d.fn(help='"Configuration is the name of a Configuration package image."', args=[d.arg(name='configuration', type=d.T.string)]), + withConfiguration(configuration): { configuration: configuration }, + '#withFunction':: d.fn(help='"Function is the name of a Function package image."', args=[d.arg(name='Function', type=d.T.string)]), + withFunction(Function): { 'function': Function }, + '#withProvider':: d.fn(help='"Provider is the name of a Provider package image."', args=[d.arg(name='provider', type=d.T.string)]), + withProvider(provider): { provider: provider }, + '#withVersion':: d.fn(help='"Version is the semantic version constraints of the dependency image."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { version: version }, + }, + '#withDependsOn':: d.fn(help='"Dependencies on other packages."', args=[d.arg(name='dependsOn', type=d.T.array)]), + withDependsOn(dependsOn): { spec+: { dependsOn: if std.isArray(v=dependsOn) then dependsOn else [dependsOn] } }, + '#withDependsOnMixin':: d.fn(help='"Dependencies on other packages."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='dependsOn', type=d.T.array)]), + withDependsOnMixin(dependsOn): { spec+: { dependsOn+: if std.isArray(v=dependsOn) then dependsOn else [dependsOn] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/meta/v1/main.libsonnet b/crossplane/1.17/_gen/meta/v1/main.libsonnet new file mode 100644 index 0000000..78edc06 --- /dev/null +++ b/crossplane/1.17/_gen/meta/v1/main.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1', url='', help=''), + configuration: (import 'configuration.libsonnet'), + provider: (import 'provider.libsonnet'), +} diff --git a/crossplane/1.17/_gen/meta/v1/provider.libsonnet b/crossplane/1.17/_gen/meta/v1/provider.libsonnet new file mode 100644 index 0000000..7a5f6c8 --- /dev/null +++ b/crossplane/1.17/_gen/meta/v1/provider.libsonnet @@ -0,0 +1,107 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='provider', url='', help='"A Provider is the description of a Crossplane Provider package."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Provider', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'meta.pkg.crossplane.io/v1', + kind: 'Provider', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ProviderSpec specifies the configuration of a Provider."'), + spec: { + '#controller':: d.obj(help="\"Configuration for the packaged Provider's controller.\""), + controller: { + '#permissionRequests':: d.obj(help="\"PermissionRequests for RBAC rules required for this provider's controller\\nto function. The RBAC manager is responsible for assessing the requested\\npermissions.\""), + permissionRequests: { + '#withApiGroups':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of\\nthe enumerated resources in any API group will be allowed. \\"\\" represents the core API group and \\"*\\" represents all API groups."', args=[d.arg(name='apiGroups', type=d.T.array)]), + withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, + '#withApiGroupsMixin':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of\\nthe enumerated resources in any API group will be allowed. \\"\\" represents the core API group and \\"*\\" represents all API groups."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), + withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, + '#withNonResourceURLs':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path\\nSince non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.\\nRules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), + withNonResourceURLs(nonResourceURLs): { nonResourceURLs: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, + '#withNonResourceURLsMixin':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path\\nSince non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.\\nRules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), + withNonResourceURLsMixin(nonResourceURLs): { nonResourceURLs+: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, + '#withResourceNames':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."', args=[d.arg(name='resourceNames', type=d.T.array)]), + withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, + '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), + withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, + '#withResources':: d.fn(help="\"Resources is a list of resources this rule applies to. '*' represents all resources.\"", args=[d.arg(name='resources', type=d.T.array)]), + withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, + '#withResourcesMixin':: d.fn(help="\"Resources is a list of resources this rule applies to. '*' represents all resources.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resources', type=d.T.array)]), + withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, + '#withVerbs':: d.fn(help="\"Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.\"", args=[d.arg(name='verbs', type=d.T.array)]), + withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, + '#withVerbsMixin':: d.fn(help="\"Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='verbs', type=d.T.array)]), + withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, + }, + '#withImage':: d.fn(help='"Image is the packaged Provider controller image."', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { spec+: { controller+: { image: image } } }, + '#withPermissionRequests':: d.fn(help="\"PermissionRequests for RBAC rules required for this provider's controller\\nto function. The RBAC manager is responsible for assessing the requested\\npermissions.\"", args=[d.arg(name='permissionRequests', type=d.T.array)]), + withPermissionRequests(permissionRequests): { spec+: { controller+: { permissionRequests: if std.isArray(v=permissionRequests) then permissionRequests else [permissionRequests] } } }, + '#withPermissionRequestsMixin':: d.fn(help="\"PermissionRequests for RBAC rules required for this provider's controller\\nto function. The RBAC manager is responsible for assessing the requested\\npermissions.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='permissionRequests', type=d.T.array)]), + withPermissionRequestsMixin(permissionRequests): { spec+: { controller+: { permissionRequests+: if std.isArray(v=permissionRequests) then permissionRequests else [permissionRequests] } } }, + }, + '#crossplane':: d.obj(help='"Semantic version constraints of Crossplane that package is compatible with."'), + crossplane: { + '#withVersion':: d.fn(help='"Semantic version constraints of Crossplane that package is compatible with."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { spec+: { crossplane+: { version: version } } }, + }, + '#dependsOn':: d.obj(help='"Dependencies on other packages."'), + dependsOn: { + '#withConfiguration':: d.fn(help='"Configuration is the name of a Configuration package image."', args=[d.arg(name='configuration', type=d.T.string)]), + withConfiguration(configuration): { configuration: configuration }, + '#withFunction':: d.fn(help='"Function is the name of a Function package image."', args=[d.arg(name='Function', type=d.T.string)]), + withFunction(Function): { 'function': Function }, + '#withProvider':: d.fn(help='"Provider is the name of a Provider package image."', args=[d.arg(name='provider', type=d.T.string)]), + withProvider(provider): { provider: provider }, + '#withVersion':: d.fn(help='"Version is the semantic version constraints of the dependency image."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { version: version }, + }, + '#withDependsOn':: d.fn(help='"Dependencies on other packages."', args=[d.arg(name='dependsOn', type=d.T.array)]), + withDependsOn(dependsOn): { spec+: { dependsOn: if std.isArray(v=dependsOn) then dependsOn else [dependsOn] } }, + '#withDependsOnMixin':: d.fn(help='"Dependencies on other packages."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='dependsOn', type=d.T.array)]), + withDependsOnMixin(dependsOn): { spec+: { dependsOn+: if std.isArray(v=dependsOn) then dependsOn else [dependsOn] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/meta/v1alpha1/configuration.libsonnet b/crossplane/1.17/_gen/meta/v1alpha1/configuration.libsonnet new file mode 100644 index 0000000..13519a8 --- /dev/null +++ b/crossplane/1.17/_gen/meta/v1alpha1/configuration.libsonnet @@ -0,0 +1,75 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='configuration', url='', help='"A Configuration is the description of a Crossplane Configuration package."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Configuration', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'meta.pkg.crossplane.io/v1alpha1', + kind: 'Configuration', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ConfigurationSpec specifies the configuration of a Configuration."'), + spec: { + '#crossplane':: d.obj(help='"Semantic version constraints of Crossplane that package is compatible with."'), + crossplane: { + '#withVersion':: d.fn(help='"Semantic version constraints of Crossplane that package is compatible with."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { spec+: { crossplane+: { version: version } } }, + }, + '#dependsOn':: d.obj(help='"Dependencies on other packages."'), + dependsOn: { + '#withConfiguration':: d.fn(help='"Configuration is the name of a Configuration package image."', args=[d.arg(name='configuration', type=d.T.string)]), + withConfiguration(configuration): { configuration: configuration }, + '#withFunction':: d.fn(help='"Function is the name of a Function package image."', args=[d.arg(name='Function', type=d.T.string)]), + withFunction(Function): { 'function': Function }, + '#withProvider':: d.fn(help='"Provider is the name of a Provider package image."', args=[d.arg(name='provider', type=d.T.string)]), + withProvider(provider): { provider: provider }, + '#withVersion':: d.fn(help='"Version is the semantic version constraints of the dependency image."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { version: version }, + }, + '#withDependsOn':: d.fn(help='"Dependencies on other packages."', args=[d.arg(name='dependsOn', type=d.T.array)]), + withDependsOn(dependsOn): { spec+: { dependsOn: if std.isArray(v=dependsOn) then dependsOn else [dependsOn] } }, + '#withDependsOnMixin':: d.fn(help='"Dependencies on other packages."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='dependsOn', type=d.T.array)]), + withDependsOnMixin(dependsOn): { spec+: { dependsOn+: if std.isArray(v=dependsOn) then dependsOn else [dependsOn] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/meta/v1alpha1/main.libsonnet b/crossplane/1.17/_gen/meta/v1alpha1/main.libsonnet new file mode 100644 index 0000000..94820d0 --- /dev/null +++ b/crossplane/1.17/_gen/meta/v1alpha1/main.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha1', url='', help=''), + configuration: (import 'configuration.libsonnet'), + provider: (import 'provider.libsonnet'), +} diff --git a/crossplane/1.17/_gen/meta/v1alpha1/provider.libsonnet b/crossplane/1.17/_gen/meta/v1alpha1/provider.libsonnet new file mode 100644 index 0000000..61d5ffc --- /dev/null +++ b/crossplane/1.17/_gen/meta/v1alpha1/provider.libsonnet @@ -0,0 +1,107 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='provider', url='', help='"A Provider is the description of a Crossplane Provider package."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Provider', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'meta.pkg.crossplane.io/v1alpha1', + kind: 'Provider', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ProviderSpec specifies the configuration of a Provider."'), + spec: { + '#controller':: d.obj(help="\"Configuration for the packaged Provider's controller.\""), + controller: { + '#permissionRequests':: d.obj(help="\"PermissionRequests for RBAC rules required for this provider's controller\\nto function. The RBAC manager is responsible for assessing the requested\\npermissions.\""), + permissionRequests: { + '#withApiGroups':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of\\nthe enumerated resources in any API group will be allowed. \\"\\" represents the core API group and \\"*\\" represents all API groups."', args=[d.arg(name='apiGroups', type=d.T.array)]), + withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, + '#withApiGroupsMixin':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of\\nthe enumerated resources in any API group will be allowed. \\"\\" represents the core API group and \\"*\\" represents all API groups."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), + withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, + '#withNonResourceURLs':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path\\nSince non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.\\nRules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), + withNonResourceURLs(nonResourceURLs): { nonResourceURLs: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, + '#withNonResourceURLsMixin':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path\\nSince non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.\\nRules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), + withNonResourceURLsMixin(nonResourceURLs): { nonResourceURLs+: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, + '#withResourceNames':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."', args=[d.arg(name='resourceNames', type=d.T.array)]), + withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, + '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), + withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, + '#withResources':: d.fn(help="\"Resources is a list of resources this rule applies to. '*' represents all resources.\"", args=[d.arg(name='resources', type=d.T.array)]), + withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, + '#withResourcesMixin':: d.fn(help="\"Resources is a list of resources this rule applies to. '*' represents all resources.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resources', type=d.T.array)]), + withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, + '#withVerbs':: d.fn(help="\"Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.\"", args=[d.arg(name='verbs', type=d.T.array)]), + withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, + '#withVerbsMixin':: d.fn(help="\"Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='verbs', type=d.T.array)]), + withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, + }, + '#withImage':: d.fn(help='"Image is the packaged Provider controller image."', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { spec+: { controller+: { image: image } } }, + '#withPermissionRequests':: d.fn(help="\"PermissionRequests for RBAC rules required for this provider's controller\\nto function. The RBAC manager is responsible for assessing the requested\\npermissions.\"", args=[d.arg(name='permissionRequests', type=d.T.array)]), + withPermissionRequests(permissionRequests): { spec+: { controller+: { permissionRequests: if std.isArray(v=permissionRequests) then permissionRequests else [permissionRequests] } } }, + '#withPermissionRequestsMixin':: d.fn(help="\"PermissionRequests for RBAC rules required for this provider's controller\\nto function. The RBAC manager is responsible for assessing the requested\\npermissions.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='permissionRequests', type=d.T.array)]), + withPermissionRequestsMixin(permissionRequests): { spec+: { controller+: { permissionRequests+: if std.isArray(v=permissionRequests) then permissionRequests else [permissionRequests] } } }, + }, + '#crossplane':: d.obj(help='"Semantic version constraints of Crossplane that package is compatible with."'), + crossplane: { + '#withVersion':: d.fn(help='"Semantic version constraints of Crossplane that package is compatible with."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { spec+: { crossplane+: { version: version } } }, + }, + '#dependsOn':: d.obj(help='"Dependencies on other packages."'), + dependsOn: { + '#withConfiguration':: d.fn(help='"Configuration is the name of a Configuration package image."', args=[d.arg(name='configuration', type=d.T.string)]), + withConfiguration(configuration): { configuration: configuration }, + '#withFunction':: d.fn(help='"Function is the name of a Function package image."', args=[d.arg(name='Function', type=d.T.string)]), + withFunction(Function): { 'function': Function }, + '#withProvider':: d.fn(help='"Provider is the name of a Provider package image."', args=[d.arg(name='provider', type=d.T.string)]), + withProvider(provider): { provider: provider }, + '#withVersion':: d.fn(help='"Version is the semantic version constraints of the dependency image."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { version: version }, + }, + '#withDependsOn':: d.fn(help='"Dependencies on other packages."', args=[d.arg(name='dependsOn', type=d.T.array)]), + withDependsOn(dependsOn): { spec+: { dependsOn: if std.isArray(v=dependsOn) then dependsOn else [dependsOn] } }, + '#withDependsOnMixin':: d.fn(help='"Dependencies on other packages."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='dependsOn', type=d.T.array)]), + withDependsOnMixin(dependsOn): { spec+: { dependsOn+: if std.isArray(v=dependsOn) then dependsOn else [dependsOn] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/meta/v1beta1/function.libsonnet b/crossplane/1.17/_gen/meta/v1beta1/function.libsonnet new file mode 100644 index 0000000..f7bd1e5 --- /dev/null +++ b/crossplane/1.17/_gen/meta/v1beta1/function.libsonnet @@ -0,0 +1,77 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='function', url='', help='"A Function is the description of a Crossplane Function package."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Function', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'meta.pkg.crossplane.io/v1beta1', + kind: 'Function', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"FunctionSpec specifies the configuration of a Function."'), + spec: { + '#crossplane':: d.obj(help='"Semantic version constraints of Crossplane that package is compatible with."'), + crossplane: { + '#withVersion':: d.fn(help='"Semantic version constraints of Crossplane that package is compatible with."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { spec+: { crossplane+: { version: version } } }, + }, + '#dependsOn':: d.obj(help='"Dependencies on other packages."'), + dependsOn: { + '#withConfiguration':: d.fn(help='"Configuration is the name of a Configuration package image."', args=[d.arg(name='configuration', type=d.T.string)]), + withConfiguration(configuration): { configuration: configuration }, + '#withFunction':: d.fn(help='"Function is the name of a Function package image."', args=[d.arg(name='Function', type=d.T.string)]), + withFunction(Function): { 'function': Function }, + '#withProvider':: d.fn(help='"Provider is the name of a Provider package image."', args=[d.arg(name='provider', type=d.T.string)]), + withProvider(provider): { provider: provider }, + '#withVersion':: d.fn(help='"Version is the semantic version constraints of the dependency image."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { version: version }, + }, + '#withDependsOn':: d.fn(help='"Dependencies on other packages."', args=[d.arg(name='dependsOn', type=d.T.array)]), + withDependsOn(dependsOn): { spec+: { dependsOn: if std.isArray(v=dependsOn) then dependsOn else [dependsOn] } }, + '#withDependsOnMixin':: d.fn(help='"Dependencies on other packages."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='dependsOn', type=d.T.array)]), + withDependsOnMixin(dependsOn): { spec+: { dependsOn+: if std.isArray(v=dependsOn) then dependsOn else [dependsOn] } }, + '#withImage':: d.fn(help='"Image is the packaged Function image."', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { spec+: { image: image } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/meta/v1beta1/main.libsonnet b/crossplane/1.17/_gen/meta/v1beta1/main.libsonnet new file mode 100644 index 0000000..b9a7760 --- /dev/null +++ b/crossplane/1.17/_gen/meta/v1beta1/main.libsonnet @@ -0,0 +1,5 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1beta1', url='', help=''), + 'function': (import 'function.libsonnet'), +} diff --git a/crossplane/1.17/_gen/pkg/main.libsonnet b/crossplane/1.17/_gen/pkg/main.libsonnet new file mode 100644 index 0000000..ca7c1fa --- /dev/null +++ b/crossplane/1.17/_gen/pkg/main.libsonnet @@ -0,0 +1,7 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='pkg', url='', help=''), + v1: (import 'v1/main.libsonnet'), + v1alpha1: (import 'v1alpha1/main.libsonnet'), + v1beta1: (import 'v1beta1/main.libsonnet'), +} diff --git a/crossplane/1.17/_gen/pkg/v1/configuration.libsonnet b/crossplane/1.17/_gen/pkg/v1/configuration.libsonnet new file mode 100644 index 0000000..421e89d --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1/configuration.libsonnet @@ -0,0 +1,82 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='configuration', url='', help='"A Configuration installs an OCI compatible Crossplane package, extending\\nCrossplane with support for new kinds of CompositeResourceDefinitions and\\nCompositions.\\n\\n\\nRead the Crossplane documentation for\\n[more information about Configuration packages](https://docs.crossplane.io/latest/concepts/packages)."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Configuration', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'pkg.crossplane.io/v1', + kind: 'Configuration', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"ConfigurationSpec specifies details about a request to install a\\nconfiguration to Crossplane."'), + spec: { + '#packagePullSecrets':: d.obj(help='"PackagePullSecrets are named secrets in the same namespace that can be used\\nto fetch packages from private registries."'), + packagePullSecrets: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#withCommonLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabels(commonLabels): { spec+: { commonLabels: commonLabels } }, + '#withCommonLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabelsMixin(commonLabels): { spec+: { commonLabels+: commonLabels } }, + '#withIgnoreCrossplaneConstraints':: d.fn(help='"IgnoreCrossplaneConstraints indicates to the package manager whether to\\nhonor Crossplane version constrains specified by the package.\\nDefault is false."', args=[d.arg(name='ignoreCrossplaneConstraints', type=d.T.boolean)]), + withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints): { spec+: { ignoreCrossplaneConstraints: ignoreCrossplaneConstraints } }, + '#withPackage':: d.fn(help='"Package is the name of the package that is being requested."', args=[d.arg(name='package', type=d.T.string)]), + withPackage(package): { spec+: { package: package } }, + '#withPackagePullPolicy':: d.fn(help='"PackagePullPolicy defines the pull policy for the package.\\nDefault is IfNotPresent."', args=[d.arg(name='packagePullPolicy', type=d.T.string)]), + withPackagePullPolicy(packagePullPolicy): { spec+: { packagePullPolicy: packagePullPolicy } }, + '#withPackagePullSecrets':: d.fn(help='"PackagePullSecrets are named secrets in the same namespace that can be used\\nto fetch packages from private registries."', args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecrets(packagePullSecrets): { spec+: { packagePullSecrets: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withPackagePullSecretsMixin':: d.fn(help='"PackagePullSecrets are named secrets in the same namespace that can be used\\nto fetch packages from private registries."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecretsMixin(packagePullSecrets): { spec+: { packagePullSecrets+: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withRevisionActivationPolicy':: d.fn(help='"RevisionActivationPolicy specifies how the package controller should\\nupdate from one revision to the next. Options are Automatic or Manual.\\nDefault is Automatic."', args=[d.arg(name='revisionActivationPolicy', type=d.T.string)]), + withRevisionActivationPolicy(revisionActivationPolicy): { spec+: { revisionActivationPolicy: revisionActivationPolicy } }, + '#withRevisionHistoryLimit':: d.fn(help='"RevisionHistoryLimit dictates how the package controller cleans up old\\ninactive package revisions.\\nDefaults to 1. Can be disabled by explicitly setting to 0."', args=[d.arg(name='revisionHistoryLimit', type=d.T.integer)]), + withRevisionHistoryLimit(revisionHistoryLimit): { spec+: { revisionHistoryLimit: revisionHistoryLimit } }, + '#withSkipDependencyResolution':: d.fn(help='"SkipDependencyResolution indicates to the package manager whether to skip\\nresolving dependencies for a package. Setting this value to true may have\\nunintended consequences.\\nDefault is false."', args=[d.arg(name='skipDependencyResolution', type=d.T.boolean)]), + withSkipDependencyResolution(skipDependencyResolution): { spec+: { skipDependencyResolution: skipDependencyResolution } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/pkg/v1/configurationRevision.libsonnet b/crossplane/1.17/_gen/pkg/v1/configurationRevision.libsonnet new file mode 100644 index 0000000..b1155fa --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1/configurationRevision.libsonnet @@ -0,0 +1,82 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='configurationRevision', url='', help="\"A ConfigurationRevision represents a revision of a Configuration. Crossplane\\ncreates new revisions when there are changes to a Configuration.\\n\\n\\nCrossplane creates and manages ConfigurationRevision. Don't directly edit\\nConfigurationRevisions.\""), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ConfigurationRevision', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'pkg.crossplane.io/v1', + kind: 'ConfigurationRevision', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"PackageRevisionSpec specifies the desired state of a PackageRevision."'), + spec: { + '#packagePullSecrets':: d.obj(help="\"PackagePullSecrets are named secrets in the same namespace that can be\\nused to fetch packages from private registries. They are also applied to\\nany images pulled for the package, such as a provider's controller image.\""), + packagePullSecrets: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#withCommonLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabels(commonLabels): { spec+: { commonLabels: commonLabels } }, + '#withCommonLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabelsMixin(commonLabels): { spec+: { commonLabels+: commonLabels } }, + '#withDesiredState':: d.fn(help='"DesiredState of the PackageRevision. Can be either Active or Inactive."', args=[d.arg(name='desiredState', type=d.T.string)]), + withDesiredState(desiredState): { spec+: { desiredState: desiredState } }, + '#withIgnoreCrossplaneConstraints':: d.fn(help='"IgnoreCrossplaneConstraints indicates to the package manager whether to\\nhonor Crossplane version constrains specified by the package.\\nDefault is false."', args=[d.arg(name='ignoreCrossplaneConstraints', type=d.T.boolean)]), + withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints): { spec+: { ignoreCrossplaneConstraints: ignoreCrossplaneConstraints } }, + '#withImage':: d.fn(help='"Package image used by install Pod to extract package contents."', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { spec+: { image: image } }, + '#withPackagePullPolicy':: d.fn(help="\"PackagePullPolicy defines the pull policy for the package. It is also\\napplied to any images pulled for the package, such as a provider's\\ncontroller image.\\nDefault is IfNotPresent.\"", args=[d.arg(name='packagePullPolicy', type=d.T.string)]), + withPackagePullPolicy(packagePullPolicy): { spec+: { packagePullPolicy: packagePullPolicy } }, + '#withPackagePullSecrets':: d.fn(help="\"PackagePullSecrets are named secrets in the same namespace that can be\\nused to fetch packages from private registries. They are also applied to\\nany images pulled for the package, such as a provider's controller image.\"", args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecrets(packagePullSecrets): { spec+: { packagePullSecrets: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withPackagePullSecretsMixin':: d.fn(help="\"PackagePullSecrets are named secrets in the same namespace that can be\\nused to fetch packages from private registries. They are also applied to\\nany images pulled for the package, such as a provider's controller image.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecretsMixin(packagePullSecrets): { spec+: { packagePullSecrets+: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withRevision':: d.fn(help="\"Revision number. Indicates when the revision will be garbage collected\\nbased on the parent's RevisionHistoryLimit.\"", args=[d.arg(name='revision', type=d.T.integer)]), + withRevision(revision): { spec+: { revision: revision } }, + '#withSkipDependencyResolution':: d.fn(help='"SkipDependencyResolution indicates to the package manager whether to skip\\nresolving dependencies for a package. Setting this value to true may have\\nunintended consequences.\\nDefault is false."', args=[d.arg(name='skipDependencyResolution', type=d.T.boolean)]), + withSkipDependencyResolution(skipDependencyResolution): { spec+: { skipDependencyResolution: skipDependencyResolution } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/pkg/v1/function.libsonnet b/crossplane/1.17/_gen/pkg/v1/function.libsonnet new file mode 100644 index 0000000..5b180dd --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1/function.libsonnet @@ -0,0 +1,96 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='function', url='', help='"A Function installs an OCI compatible Crossplane package, extending\\nCrossplane with support for a new kind of composition function.\\n\\n\\nRead the Crossplane documentation for\\n[more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions)."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Function', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'pkg.crossplane.io/v1', + kind: 'Function', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"FunctionSpec specifies the configuration of a Function."'), + spec: { + '#controllerConfigRef':: d.obj(help='"ControllerConfigRef references a ControllerConfig resource that will be\\nused to configure the packaged controller Deployment.\\nDeprecated: Use RuntimeConfigReference instead."'), + controllerConfigRef: { + '#withName':: d.fn(help='"Name of the ControllerConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { controllerConfigRef+: { name: name } } }, + }, + '#packagePullSecrets':: d.obj(help='"PackagePullSecrets are named secrets in the same namespace that can be used\\nto fetch packages from private registries."'), + packagePullSecrets: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#runtimeConfigRef':: d.obj(help='"RuntimeConfigRef references a RuntimeConfig resource that will be used\\nto configure the package runtime."'), + runtimeConfigRef: { + '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { runtimeConfigRef+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind of the referent."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { runtimeConfigRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name of the RuntimeConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { runtimeConfigRef+: { name: name } } }, + }, + '#withCommonLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabels(commonLabels): { spec+: { commonLabels: commonLabels } }, + '#withCommonLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabelsMixin(commonLabels): { spec+: { commonLabels+: commonLabels } }, + '#withIgnoreCrossplaneConstraints':: d.fn(help='"IgnoreCrossplaneConstraints indicates to the package manager whether to\\nhonor Crossplane version constrains specified by the package.\\nDefault is false."', args=[d.arg(name='ignoreCrossplaneConstraints', type=d.T.boolean)]), + withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints): { spec+: { ignoreCrossplaneConstraints: ignoreCrossplaneConstraints } }, + '#withPackage':: d.fn(help='"Package is the name of the package that is being requested."', args=[d.arg(name='package', type=d.T.string)]), + withPackage(package): { spec+: { package: package } }, + '#withPackagePullPolicy':: d.fn(help='"PackagePullPolicy defines the pull policy for the package.\\nDefault is IfNotPresent."', args=[d.arg(name='packagePullPolicy', type=d.T.string)]), + withPackagePullPolicy(packagePullPolicy): { spec+: { packagePullPolicy: packagePullPolicy } }, + '#withPackagePullSecrets':: d.fn(help='"PackagePullSecrets are named secrets in the same namespace that can be used\\nto fetch packages from private registries."', args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecrets(packagePullSecrets): { spec+: { packagePullSecrets: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withPackagePullSecretsMixin':: d.fn(help='"PackagePullSecrets are named secrets in the same namespace that can be used\\nto fetch packages from private registries."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecretsMixin(packagePullSecrets): { spec+: { packagePullSecrets+: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withRevisionActivationPolicy':: d.fn(help='"RevisionActivationPolicy specifies how the package controller should\\nupdate from one revision to the next. Options are Automatic or Manual.\\nDefault is Automatic."', args=[d.arg(name='revisionActivationPolicy', type=d.T.string)]), + withRevisionActivationPolicy(revisionActivationPolicy): { spec+: { revisionActivationPolicy: revisionActivationPolicy } }, + '#withRevisionHistoryLimit':: d.fn(help='"RevisionHistoryLimit dictates how the package controller cleans up old\\ninactive package revisions.\\nDefaults to 1. Can be disabled by explicitly setting to 0."', args=[d.arg(name='revisionHistoryLimit', type=d.T.integer)]), + withRevisionHistoryLimit(revisionHistoryLimit): { spec+: { revisionHistoryLimit: revisionHistoryLimit } }, + '#withSkipDependencyResolution':: d.fn(help='"SkipDependencyResolution indicates to the package manager whether to skip\\nresolving dependencies for a package. Setting this value to true may have\\nunintended consequences.\\nDefault is false."', args=[d.arg(name='skipDependencyResolution', type=d.T.boolean)]), + withSkipDependencyResolution(skipDependencyResolution): { spec+: { skipDependencyResolution: skipDependencyResolution } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/pkg/v1/functionRevision.libsonnet b/crossplane/1.17/_gen/pkg/v1/functionRevision.libsonnet new file mode 100644 index 0000000..f7ed494 --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1/functionRevision.libsonnet @@ -0,0 +1,100 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='functionRevision', url='', help="\"A FunctionRevision represents a revision of a Function. Crossplane\\ncreates new revisions when there are changes to the Function.\\n\\n\\nCrossplane creates and manages FunctionRevisions. Don't directly edit\\nFunctionRevisions.\""), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of FunctionRevision', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'pkg.crossplane.io/v1', + kind: 'FunctionRevision', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"FunctionRevisionSpec specifies configuration for a FunctionRevision."'), + spec: { + '#controllerConfigRef':: d.obj(help='"ControllerConfigRef references a ControllerConfig resource that will be\\nused to configure the packaged controller Deployment.\\nDeprecated: Use RuntimeConfigReference instead."'), + controllerConfigRef: { + '#withName':: d.fn(help='"Name of the ControllerConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { controllerConfigRef+: { name: name } } }, + }, + '#packagePullSecrets':: d.obj(help="\"PackagePullSecrets are named secrets in the same namespace that can be\\nused to fetch packages from private registries. They are also applied to\\nany images pulled for the package, such as a provider's controller image.\""), + packagePullSecrets: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#runtimeConfigRef':: d.obj(help='"RuntimeConfigRef references a RuntimeConfig resource that will be used\\nto configure the package runtime."'), + runtimeConfigRef: { + '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { runtimeConfigRef+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind of the referent."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { runtimeConfigRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name of the RuntimeConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { runtimeConfigRef+: { name: name } } }, + }, + '#withCommonLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabels(commonLabels): { spec+: { commonLabels: commonLabels } }, + '#withCommonLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabelsMixin(commonLabels): { spec+: { commonLabels+: commonLabels } }, + '#withDesiredState':: d.fn(help='"DesiredState of the PackageRevision. Can be either Active or Inactive."', args=[d.arg(name='desiredState', type=d.T.string)]), + withDesiredState(desiredState): { spec+: { desiredState: desiredState } }, + '#withIgnoreCrossplaneConstraints':: d.fn(help='"IgnoreCrossplaneConstraints indicates to the package manager whether to\\nhonor Crossplane version constrains specified by the package.\\nDefault is false."', args=[d.arg(name='ignoreCrossplaneConstraints', type=d.T.boolean)]), + withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints): { spec+: { ignoreCrossplaneConstraints: ignoreCrossplaneConstraints } }, + '#withImage':: d.fn(help='"Package image used by install Pod to extract package contents."', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { spec+: { image: image } }, + '#withPackagePullPolicy':: d.fn(help="\"PackagePullPolicy defines the pull policy for the package. It is also\\napplied to any images pulled for the package, such as a provider's\\ncontroller image.\\nDefault is IfNotPresent.\"", args=[d.arg(name='packagePullPolicy', type=d.T.string)]), + withPackagePullPolicy(packagePullPolicy): { spec+: { packagePullPolicy: packagePullPolicy } }, + '#withPackagePullSecrets':: d.fn(help="\"PackagePullSecrets are named secrets in the same namespace that can be\\nused to fetch packages from private registries. They are also applied to\\nany images pulled for the package, such as a provider's controller image.\"", args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecrets(packagePullSecrets): { spec+: { packagePullSecrets: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withPackagePullSecretsMixin':: d.fn(help="\"PackagePullSecrets are named secrets in the same namespace that can be\\nused to fetch packages from private registries. They are also applied to\\nany images pulled for the package, such as a provider's controller image.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecretsMixin(packagePullSecrets): { spec+: { packagePullSecrets+: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withRevision':: d.fn(help="\"Revision number. Indicates when the revision will be garbage collected\\nbased on the parent's RevisionHistoryLimit.\"", args=[d.arg(name='revision', type=d.T.integer)]), + withRevision(revision): { spec+: { revision: revision } }, + '#withSkipDependencyResolution':: d.fn(help='"SkipDependencyResolution indicates to the package manager whether to skip\\nresolving dependencies for a package. Setting this value to true may have\\nunintended consequences.\\nDefault is false."', args=[d.arg(name='skipDependencyResolution', type=d.T.boolean)]), + withSkipDependencyResolution(skipDependencyResolution): { spec+: { skipDependencyResolution: skipDependencyResolution } }, + '#withTlsClientSecretName':: d.fn(help='"TLSClientSecretName is the name of the TLS Secret that stores client\\ncertificates of the Provider."', args=[d.arg(name='tlsClientSecretName', type=d.T.string)]), + withTlsClientSecretName(tlsClientSecretName): { spec+: { tlsClientSecretName: tlsClientSecretName } }, + '#withTlsServerSecretName':: d.fn(help='"TLSServerSecretName is the name of the TLS Secret that stores server\\ncertificates of the Provider."', args=[d.arg(name='tlsServerSecretName', type=d.T.string)]), + withTlsServerSecretName(tlsServerSecretName): { spec+: { tlsServerSecretName: tlsServerSecretName } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/pkg/v1/main.libsonnet b/crossplane/1.17/_gen/pkg/v1/main.libsonnet new file mode 100644 index 0000000..de1b9dd --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1/main.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1', url='', help=''), + configuration: (import 'configuration.libsonnet'), + configurationRevision: (import 'configurationRevision.libsonnet'), + 'function': (import 'function.libsonnet'), + functionRevision: (import 'functionRevision.libsonnet'), + provider: (import 'provider.libsonnet'), + providerRevision: (import 'providerRevision.libsonnet'), +} diff --git a/crossplane/1.17/_gen/pkg/v1/provider.libsonnet b/crossplane/1.17/_gen/pkg/v1/provider.libsonnet new file mode 100644 index 0000000..75b7c45 --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1/provider.libsonnet @@ -0,0 +1,96 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='provider', url='', help='"A Provider installs an OCI compatible Crossplane package, extending\\nCrossplane with support for new kinds of managed resources.\\n\\n\\nRead the Crossplane documentation for\\n[more information about Providers](https://docs.crossplane.io/latest/concepts/providers)."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Provider', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'pkg.crossplane.io/v1', + kind: 'Provider', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"ProviderSpec specifies details about a request to install a provider to\\nCrossplane."'), + spec: { + '#controllerConfigRef':: d.obj(help='"ControllerConfigRef references a ControllerConfig resource that will be\\nused to configure the packaged controller Deployment.\\nDeprecated: Use RuntimeConfigReference instead."'), + controllerConfigRef: { + '#withName':: d.fn(help='"Name of the ControllerConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { controllerConfigRef+: { name: name } } }, + }, + '#packagePullSecrets':: d.obj(help='"PackagePullSecrets are named secrets in the same namespace that can be used\\nto fetch packages from private registries."'), + packagePullSecrets: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#runtimeConfigRef':: d.obj(help='"RuntimeConfigRef references a RuntimeConfig resource that will be used\\nto configure the package runtime."'), + runtimeConfigRef: { + '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { runtimeConfigRef+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind of the referent."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { runtimeConfigRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name of the RuntimeConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { runtimeConfigRef+: { name: name } } }, + }, + '#withCommonLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabels(commonLabels): { spec+: { commonLabels: commonLabels } }, + '#withCommonLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabelsMixin(commonLabels): { spec+: { commonLabels+: commonLabels } }, + '#withIgnoreCrossplaneConstraints':: d.fn(help='"IgnoreCrossplaneConstraints indicates to the package manager whether to\\nhonor Crossplane version constrains specified by the package.\\nDefault is false."', args=[d.arg(name='ignoreCrossplaneConstraints', type=d.T.boolean)]), + withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints): { spec+: { ignoreCrossplaneConstraints: ignoreCrossplaneConstraints } }, + '#withPackage':: d.fn(help='"Package is the name of the package that is being requested."', args=[d.arg(name='package', type=d.T.string)]), + withPackage(package): { spec+: { package: package } }, + '#withPackagePullPolicy':: d.fn(help='"PackagePullPolicy defines the pull policy for the package.\\nDefault is IfNotPresent."', args=[d.arg(name='packagePullPolicy', type=d.T.string)]), + withPackagePullPolicy(packagePullPolicy): { spec+: { packagePullPolicy: packagePullPolicy } }, + '#withPackagePullSecrets':: d.fn(help='"PackagePullSecrets are named secrets in the same namespace that can be used\\nto fetch packages from private registries."', args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecrets(packagePullSecrets): { spec+: { packagePullSecrets: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withPackagePullSecretsMixin':: d.fn(help='"PackagePullSecrets are named secrets in the same namespace that can be used\\nto fetch packages from private registries."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecretsMixin(packagePullSecrets): { spec+: { packagePullSecrets+: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withRevisionActivationPolicy':: d.fn(help='"RevisionActivationPolicy specifies how the package controller should\\nupdate from one revision to the next. Options are Automatic or Manual.\\nDefault is Automatic."', args=[d.arg(name='revisionActivationPolicy', type=d.T.string)]), + withRevisionActivationPolicy(revisionActivationPolicy): { spec+: { revisionActivationPolicy: revisionActivationPolicy } }, + '#withRevisionHistoryLimit':: d.fn(help='"RevisionHistoryLimit dictates how the package controller cleans up old\\ninactive package revisions.\\nDefaults to 1. Can be disabled by explicitly setting to 0."', args=[d.arg(name='revisionHistoryLimit', type=d.T.integer)]), + withRevisionHistoryLimit(revisionHistoryLimit): { spec+: { revisionHistoryLimit: revisionHistoryLimit } }, + '#withSkipDependencyResolution':: d.fn(help='"SkipDependencyResolution indicates to the package manager whether to skip\\nresolving dependencies for a package. Setting this value to true may have\\nunintended consequences.\\nDefault is false."', args=[d.arg(name='skipDependencyResolution', type=d.T.boolean)]), + withSkipDependencyResolution(skipDependencyResolution): { spec+: { skipDependencyResolution: skipDependencyResolution } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/pkg/v1/providerRevision.libsonnet b/crossplane/1.17/_gen/pkg/v1/providerRevision.libsonnet new file mode 100644 index 0000000..52896e5 --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1/providerRevision.libsonnet @@ -0,0 +1,100 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='providerRevision', url='', help="\"A ProviderRevision represents a revision of a Provider. Crossplane\\ncreates new revisions when there are changes to a Provider.\\n\\n\\nCrossplane creates and manages ProviderRevisions. Don't directly edit\\nProviderRevisions.\""), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ProviderRevision', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'pkg.crossplane.io/v1', + kind: 'ProviderRevision', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"ProviderRevisionSpec specifies configuration for a ProviderRevision."'), + spec: { + '#controllerConfigRef':: d.obj(help='"ControllerConfigRef references a ControllerConfig resource that will be\\nused to configure the packaged controller Deployment.\\nDeprecated: Use RuntimeConfigReference instead."'), + controllerConfigRef: { + '#withName':: d.fn(help='"Name of the ControllerConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { controllerConfigRef+: { name: name } } }, + }, + '#packagePullSecrets':: d.obj(help="\"PackagePullSecrets are named secrets in the same namespace that can be\\nused to fetch packages from private registries. They are also applied to\\nany images pulled for the package, such as a provider's controller image.\""), + packagePullSecrets: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#runtimeConfigRef':: d.obj(help='"RuntimeConfigRef references a RuntimeConfig resource that will be used\\nto configure the package runtime."'), + runtimeConfigRef: { + '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { runtimeConfigRef+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind of the referent."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { runtimeConfigRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name of the RuntimeConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { runtimeConfigRef+: { name: name } } }, + }, + '#withCommonLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabels(commonLabels): { spec+: { commonLabels: commonLabels } }, + '#withCommonLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabelsMixin(commonLabels): { spec+: { commonLabels+: commonLabels } }, + '#withDesiredState':: d.fn(help='"DesiredState of the PackageRevision. Can be either Active or Inactive."', args=[d.arg(name='desiredState', type=d.T.string)]), + withDesiredState(desiredState): { spec+: { desiredState: desiredState } }, + '#withIgnoreCrossplaneConstraints':: d.fn(help='"IgnoreCrossplaneConstraints indicates to the package manager whether to\\nhonor Crossplane version constrains specified by the package.\\nDefault is false."', args=[d.arg(name='ignoreCrossplaneConstraints', type=d.T.boolean)]), + withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints): { spec+: { ignoreCrossplaneConstraints: ignoreCrossplaneConstraints } }, + '#withImage':: d.fn(help='"Package image used by install Pod to extract package contents."', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { spec+: { image: image } }, + '#withPackagePullPolicy':: d.fn(help="\"PackagePullPolicy defines the pull policy for the package. It is also\\napplied to any images pulled for the package, such as a provider's\\ncontroller image.\\nDefault is IfNotPresent.\"", args=[d.arg(name='packagePullPolicy', type=d.T.string)]), + withPackagePullPolicy(packagePullPolicy): { spec+: { packagePullPolicy: packagePullPolicy } }, + '#withPackagePullSecrets':: d.fn(help="\"PackagePullSecrets are named secrets in the same namespace that can be\\nused to fetch packages from private registries. They are also applied to\\nany images pulled for the package, such as a provider's controller image.\"", args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecrets(packagePullSecrets): { spec+: { packagePullSecrets: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withPackagePullSecretsMixin':: d.fn(help="\"PackagePullSecrets are named secrets in the same namespace that can be\\nused to fetch packages from private registries. They are also applied to\\nany images pulled for the package, such as a provider's controller image.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecretsMixin(packagePullSecrets): { spec+: { packagePullSecrets+: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withRevision':: d.fn(help="\"Revision number. Indicates when the revision will be garbage collected\\nbased on the parent's RevisionHistoryLimit.\"", args=[d.arg(name='revision', type=d.T.integer)]), + withRevision(revision): { spec+: { revision: revision } }, + '#withSkipDependencyResolution':: d.fn(help='"SkipDependencyResolution indicates to the package manager whether to skip\\nresolving dependencies for a package. Setting this value to true may have\\nunintended consequences.\\nDefault is false."', args=[d.arg(name='skipDependencyResolution', type=d.T.boolean)]), + withSkipDependencyResolution(skipDependencyResolution): { spec+: { skipDependencyResolution: skipDependencyResolution } }, + '#withTlsClientSecretName':: d.fn(help='"TLSClientSecretName is the name of the TLS Secret that stores client\\ncertificates of the Provider."', args=[d.arg(name='tlsClientSecretName', type=d.T.string)]), + withTlsClientSecretName(tlsClientSecretName): { spec+: { tlsClientSecretName: tlsClientSecretName } }, + '#withTlsServerSecretName':: d.fn(help='"TLSServerSecretName is the name of the TLS Secret that stores server\\ncertificates of the Provider."', args=[d.arg(name='tlsServerSecretName', type=d.T.string)]), + withTlsServerSecretName(tlsServerSecretName): { spec+: { tlsServerSecretName: tlsServerSecretName } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/pkg/v1alpha1/controllerConfig.libsonnet b/crossplane/1.17/_gen/pkg/v1alpha1/controllerConfig.libsonnet new file mode 100644 index 0000000..0466ae7 --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1alpha1/controllerConfig.libsonnet @@ -0,0 +1,1365 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='controllerConfig', url='', help='"A ControllerConfig applies settings to controllers like Provider pods.\\nDeprecated: Use the\\n[DeploymentRuntimeConfig](https://docs.crossplane.io/latest/concepts/providers#runtime-configuration)\\ninstead.\\n\\n\\nRead the\\n[Package Runtime Configuration](https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md)\\ndesign document for more details."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ControllerConfig', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'pkg.crossplane.io/v1alpha1', + kind: 'ControllerConfig', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"ControllerConfigSpec specifies the configuration for a packaged controller.\\nValues provided will override package manager defaults. Labels and\\nannotations are passed to both the controller Deployment and ServiceAccount."'), + spec: { + '#affinity':: d.obj(help="\"If specified, the pod's scheduling constraints\""), + affinity: { + '#nodeAffinity':: d.obj(help='"Describes node affinity scheduling rules for the pod."'), + nodeAffinity: { + '#preferredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node matches the corresponding matchExpressions; the\\nnode(s) with the highest sum are the most preferred."'), + preferredDuringSchedulingIgnoredDuringExecution: { + '#preference':: d.obj(help='"A node selector term, associated with the corresponding weight."'), + preference: { + '#matchExpressions':: d.obj(help="\"A list of node selector requirements by node's labels.\""), + matchExpressions: { + '#withKey':: d.fn(help='"The label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"Represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#matchFields':: d.obj(help="\"A list of node selector requirements by node's fields.\""), + matchFields: { + '#withKey':: d.fn(help='"The label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"Represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help="\"A list of node selector requirements by node's labels.\"", args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { preference+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help="\"A list of node selector requirements by node's labels.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { preference+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchFields':: d.fn(help="\"A list of node selector requirements by node's fields.\"", args=[d.arg(name='matchFields', type=d.T.array)]), + withMatchFields(matchFields): { preference+: { matchFields: if std.isArray(v=matchFields) then matchFields else [matchFields] } }, + '#withMatchFieldsMixin':: d.fn(help="\"A list of node selector requirements by node's fields.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchFields', type=d.T.array)]), + withMatchFieldsMixin(matchFields): { preference+: { matchFields+: if std.isArray(v=matchFields) then matchFields else [matchFields] } }, + }, + '#withWeight':: d.fn(help='"Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100."', args=[d.arg(name='weight', type=d.T.integer)]), + withWeight(weight): { weight: weight }, + }, + '#requiredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"If the affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to an update), the system\\nmay or may not try to eventually evict the pod from its node."'), + requiredDuringSchedulingIgnoredDuringExecution: { + '#nodeSelectorTerms':: d.obj(help='"Required. A list of node selector terms. The terms are ORed."'), + nodeSelectorTerms: { + '#matchExpressions':: d.obj(help="\"A list of node selector requirements by node's labels.\""), + matchExpressions: { + '#withKey':: d.fn(help='"The label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"Represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#matchFields':: d.obj(help="\"A list of node selector requirements by node's fields.\""), + matchFields: { + '#withKey':: d.fn(help='"The label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"Represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help="\"A list of node selector requirements by node's labels.\"", args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] }, + '#withMatchExpressionsMixin':: d.fn(help="\"A list of node selector requirements by node's labels.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] }, + '#withMatchFields':: d.fn(help="\"A list of node selector requirements by node's fields.\"", args=[d.arg(name='matchFields', type=d.T.array)]), + withMatchFields(matchFields): { matchFields: if std.isArray(v=matchFields) then matchFields else [matchFields] }, + '#withMatchFieldsMixin':: d.fn(help="\"A list of node selector requirements by node's fields.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchFields', type=d.T.array)]), + withMatchFieldsMixin(matchFields): { matchFields+: if std.isArray(v=matchFields) then matchFields else [matchFields] }, + }, + '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTerms(nodeSelectorTerms): { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } }, + '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTermsMixin(nodeSelectorTerms): { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } }, + }, + '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node matches the corresponding matchExpressions; the\\nnode(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } }, + '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node matches the corresponding matchExpressions; the\\nnode(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } }, + }, + '#podAffinity':: d.obj(help='"Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s))."'), + podAffinity: { + '#preferredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\\nnode(s) with the highest sum are the most preferred."'), + preferredDuringSchedulingIgnoredDuringExecution: { + '#podAffinityTerm':: d.obj(help='"Required. A pod affinity term, associated with the corresponding weight."'), + podAffinityTerm: { + '#labelSelector':: d.obj(help="\"A label query over a set of resources, in this case pods.\\nIf it's null, this PodAffinityTerm matches with no Pods.\""), + labelSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { podAffinityTerm+: { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { podAffinityTerm+: { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { podAffinityTerm+: { labelSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { podAffinityTerm+: { labelSelector+: { matchLabels+: matchLabels } } }, + }, + '#namespaceSelector':: d.obj(help="\"A label query over the set of namespaces that the term applies to.\\nThe term is applied to the union of the namespaces selected by this field\\nand the ones listed in the namespaces field.\\nnull selector and null or empty namespaces list means \\\"this pod's namespace\\\".\\nAn empty selector ({}) matches all namespaces.\""), + namespaceSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { podAffinityTerm+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { podAffinityTerm+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { podAffinityTerm+: { namespaceSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { podAffinityTerm+: { namespaceSelector+: { matchLabels+: matchLabels } } }, + }, + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { podAffinityTerm+: { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] } }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { podAffinityTerm+: { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] } }, + '#withMismatchLabelKeys':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeys(mismatchLabelKeys): { podAffinityTerm+: { mismatchLabelKeys: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] } }, + '#withMismatchLabelKeysMixin':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeysMixin(mismatchLabelKeys): { podAffinityTerm+: { mismatchLabelKeys+: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] } }, + '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespaces(namespaces): { podAffinityTerm+: { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] } }, + '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespacesMixin(namespaces): { podAffinityTerm+: { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] } }, + '#withTopologyKey':: d.fn(help='"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\\nwhose value of the label with key topologyKey matches that of any node on which any of the\\nselected pods is running.\\nEmpty topologyKey is not allowed."', args=[d.arg(name='topologyKey', type=d.T.string)]), + withTopologyKey(topologyKey): { podAffinityTerm+: { topologyKey: topologyKey } }, + }, + '#withWeight':: d.fn(help='"weight associated with matching the corresponding podAffinityTerm,\\nin the range 1-100."', args=[d.arg(name='weight', type=d.T.integer)]), + withWeight(weight): { weight: weight }, + }, + '#requiredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"If the affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to a pod label update), the\\nsystem may or may not try to eventually evict the pod from its node.\\nWhen there are multiple elements, the lists of nodes corresponding to each\\npodAffinityTerm are intersected, i.e. all terms must be satisfied."'), + requiredDuringSchedulingIgnoredDuringExecution: { + '#labelSelector':: d.obj(help="\"A label query over a set of resources, in this case pods.\\nIf it's null, this PodAffinityTerm matches with no Pods.\""), + labelSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { labelSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { labelSelector+: { matchLabels+: matchLabels } }, + }, + '#namespaceSelector':: d.obj(help="\"A label query over the set of namespaces that the term applies to.\\nThe term is applied to the union of the namespaces selected by this field\\nand the ones listed in the namespaces field.\\nnull selector and null or empty namespaces list means \\\"this pod's namespace\\\".\\nAn empty selector ({}) matches all namespaces.\""), + namespaceSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { namespaceSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, + }, + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMismatchLabelKeys':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeys(mismatchLabelKeys): { mismatchLabelKeys: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] }, + '#withMismatchLabelKeysMixin':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeysMixin(mismatchLabelKeys): { mismatchLabelKeys+: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] }, + '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespaces(namespaces): { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] }, + '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespacesMixin(namespaces): { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] }, + '#withTopologyKey':: d.fn(help='"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\\nwhose value of the label with key topologyKey matches that of any node on which any of the\\nselected pods is running.\\nEmpty topologyKey is not allowed."', args=[d.arg(name='topologyKey', type=d.T.string)]), + withTopologyKey(topologyKey): { topologyKey: topologyKey }, + }, + '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\\nnode(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } }, + '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\\nnode(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } }, + '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to a pod label update), the\\nsystem may or may not try to eventually evict the pod from its node.\\nWhen there are multiple elements, the lists of nodes corresponding to each\\npodAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } }, + '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to a pod label update), the\\nsystem may or may not try to eventually evict the pod from its node.\\nWhen there are multiple elements, the lists of nodes corresponding to each\\npodAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } }, + }, + '#podAntiAffinity':: d.obj(help='"Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s))."'), + podAntiAffinity: { + '#preferredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe anti-affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\\nnode(s) with the highest sum are the most preferred."'), + preferredDuringSchedulingIgnoredDuringExecution: { + '#podAffinityTerm':: d.obj(help='"Required. A pod affinity term, associated with the corresponding weight."'), + podAffinityTerm: { + '#labelSelector':: d.obj(help="\"A label query over a set of resources, in this case pods.\\nIf it's null, this PodAffinityTerm matches with no Pods.\""), + labelSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { podAffinityTerm+: { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { podAffinityTerm+: { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { podAffinityTerm+: { labelSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { podAffinityTerm+: { labelSelector+: { matchLabels+: matchLabels } } }, + }, + '#namespaceSelector':: d.obj(help="\"A label query over the set of namespaces that the term applies to.\\nThe term is applied to the union of the namespaces selected by this field\\nand the ones listed in the namespaces field.\\nnull selector and null or empty namespaces list means \\\"this pod's namespace\\\".\\nAn empty selector ({}) matches all namespaces.\""), + namespaceSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { podAffinityTerm+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { podAffinityTerm+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { podAffinityTerm+: { namespaceSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { podAffinityTerm+: { namespaceSelector+: { matchLabels+: matchLabels } } }, + }, + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { podAffinityTerm+: { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] } }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { podAffinityTerm+: { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] } }, + '#withMismatchLabelKeys':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeys(mismatchLabelKeys): { podAffinityTerm+: { mismatchLabelKeys: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] } }, + '#withMismatchLabelKeysMixin':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeysMixin(mismatchLabelKeys): { podAffinityTerm+: { mismatchLabelKeys+: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] } }, + '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespaces(namespaces): { podAffinityTerm+: { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] } }, + '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespacesMixin(namespaces): { podAffinityTerm+: { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] } }, + '#withTopologyKey':: d.fn(help='"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\\nwhose value of the label with key topologyKey matches that of any node on which any of the\\nselected pods is running.\\nEmpty topologyKey is not allowed."', args=[d.arg(name='topologyKey', type=d.T.string)]), + withTopologyKey(topologyKey): { podAffinityTerm+: { topologyKey: topologyKey } }, + }, + '#withWeight':: d.fn(help='"weight associated with matching the corresponding podAffinityTerm,\\nin the range 1-100."', args=[d.arg(name='weight', type=d.T.integer)]), + withWeight(weight): { weight: weight }, + }, + '#requiredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"If the anti-affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the anti-affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to a pod label update), the\\nsystem may or may not try to eventually evict the pod from its node.\\nWhen there are multiple elements, the lists of nodes corresponding to each\\npodAffinityTerm are intersected, i.e. all terms must be satisfied."'), + requiredDuringSchedulingIgnoredDuringExecution: { + '#labelSelector':: d.obj(help="\"A label query over a set of resources, in this case pods.\\nIf it's null, this PodAffinityTerm matches with no Pods.\""), + labelSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { labelSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { labelSelector+: { matchLabels+: matchLabels } }, + }, + '#namespaceSelector':: d.obj(help="\"A label query over the set of namespaces that the term applies to.\\nThe term is applied to the union of the namespaces selected by this field\\nand the ones listed in the namespaces field.\\nnull selector and null or empty namespaces list means \\\"this pod's namespace\\\".\\nAn empty selector ({}) matches all namespaces.\""), + namespaceSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { namespaceSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, + }, + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMismatchLabelKeys':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeys(mismatchLabelKeys): { mismatchLabelKeys: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] }, + '#withMismatchLabelKeysMixin':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeysMixin(mismatchLabelKeys): { mismatchLabelKeys+: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] }, + '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespaces(namespaces): { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] }, + '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespacesMixin(namespaces): { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] }, + '#withTopologyKey':: d.fn(help='"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\\nwhose value of the label with key topologyKey matches that of any node on which any of the\\nselected pods is running.\\nEmpty topologyKey is not allowed."', args=[d.arg(name='topologyKey', type=d.T.string)]), + withTopologyKey(topologyKey): { topologyKey: topologyKey }, + }, + '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe anti-affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\\nnode(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } }, + '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe anti-affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\\nnode(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } }, + '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the anti-affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to a pod label update), the\\nsystem may or may not try to eventually evict the pod from its node.\\nWhen there are multiple elements, the lists of nodes corresponding to each\\npodAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } }, + '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the anti-affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to a pod label update), the\\nsystem may or may not try to eventually evict the pod from its node.\\nWhen there are multiple elements, the lists of nodes corresponding to each\\npodAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } }, + }, + }, + '#env':: d.obj(help='"List of environment variables to set in the container.\\nCannot be updated."'), + env: { + '#valueFrom':: d.obj(help="\"Source for the environment variable's value. Cannot be used if value is not empty.\""), + valueFrom: { + '#configMapKeyRef':: d.obj(help='"Selects a key of a ConfigMap."'), + configMapKeyRef: { + '#withKey':: d.fn(help='"The key to select."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { valueFrom+: { configMapKeyRef+: { key: key } } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { valueFrom+: { configMapKeyRef+: { name: name } } }, + '#withOptional':: d.fn(help='"Specify whether the ConfigMap or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { valueFrom+: { configMapKeyRef+: { optional: optional } } }, + }, + '#fieldRef':: d.obj(help="\"Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\u003cKEY\u003e']`, `metadata.annotations['\u003cKEY\u003e']`,\\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.\""), + fieldRef: { + '#withApiVersion':: d.fn(help='"Version of the schema the FieldPath is written in terms of, defaults to \\"v1\\"."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { valueFrom+: { fieldRef+: { apiVersion: apiVersion } } }, + '#withFieldPath':: d.fn(help='"Path of the field to select in the specified API version."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { valueFrom+: { fieldRef+: { fieldPath: fieldPath } } }, + }, + '#resourceFieldRef':: d.obj(help='"Selects a resource of the container: only resources limits and requests\\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported."'), + resourceFieldRef: { + '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { valueFrom+: { resourceFieldRef+: { containerName: containerName } } }, + '#withDivisor':: d.fn(help='"Specifies the output format of the exposed resources, defaults to \\"1\\', args=[d.arg(name='divisor', type=d.T.any)]), + withDivisor(divisor): { valueFrom+: { resourceFieldRef+: { divisor: divisor } } }, + '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { valueFrom+: { resourceFieldRef+: { resource: resource } } }, + }, + '#secretKeyRef':: d.obj(help="\"Selects a key of a secret in the pod's namespace\""), + secretKeyRef: { + '#withKey':: d.fn(help='"The key of the secret to select from. Must be a valid secret key."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { valueFrom+: { secretKeyRef+: { key: key } } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { valueFrom+: { secretKeyRef+: { name: name } } }, + '#withOptional':: d.fn(help='"Specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { valueFrom+: { secretKeyRef+: { optional: optional } } }, + }, + }, + '#withName':: d.fn(help='"Name of the environment variable. Must be a C_IDENTIFIER."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"Variable references $(VAR_NAME) are expanded\\nusing the previously defined environment variables in the container and\\nany service environment variables. If a variable cannot be resolved,\\nthe reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\\n\\"$$(VAR_NAME)\\" will produce the string literal \\"$(VAR_NAME)\\".\\nEscaped references will never be expanded, regardless of whether the variable\\nexists or not.\\nDefaults to \\"\\"."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#envFrom':: d.obj(help='"List of sources to populate environment variables in the container.\\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\\nwill be reported as an event when the container is starting. When a key exists in multiple\\nsources, the value associated with the last source will take precedence.\\nValues defined by an Env with a duplicate key will take precedence.\\nCannot be updated."'), + envFrom: { + '#configMapRef':: d.obj(help='"The ConfigMap to select from"'), + configMapRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { configMapRef+: { name: name } }, + '#withOptional':: d.fn(help='"Specify whether the ConfigMap must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { configMapRef+: { optional: optional } }, + }, + '#secretRef':: d.obj(help='"The Secret to select from"'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withOptional':: d.fn(help='"Specify whether the Secret must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { secretRef+: { optional: optional } }, + }, + '#withPrefix':: d.fn(help='"An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER."', args=[d.arg(name='prefix', type=d.T.string)]), + withPrefix(prefix): { prefix: prefix }, + }, + '#imagePullSecrets':: d.obj(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\\nIf specified, these secrets will be passed to individual puller implementations for them to use. For example,\\nin the case of docker, only DockerConfig type secrets are honored.\\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod\\nSetting ImagePullSecrets will replace any secrets that have been\\npropagated to a controller Deployment, typically via packagePullSecrets."'), + imagePullSecrets: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#metadata':: d.obj(help='"Metadata that will be added to the provider Pod."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be\\nset by external tools to store and retrieve arbitrary metadata. They are not\\nqueryable and should be preserved when modifying objects.\\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { spec+: { metadata+: { annotations: annotations } } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be\\nset by external tools to store and retrieve arbitrary metadata. They are not\\nqueryable and should be preserved when modifying objects.\\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { spec+: { metadata+: { annotations+: annotations } } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and\\ncategorize (scope and select) objects. This will only affect\\nlabels on the pod, not the pod selector. Labels will be merged\\nwith internal labels used by crossplane, and labels with a\\ncrossplane.io key might be overwritten.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { spec+: { metadata+: { labels: labels } } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and\\ncategorize (scope and select) objects. This will only affect\\nlabels on the pod, not the pod selector. Labels will be merged\\nwith internal labels used by crossplane, and labels with a\\ncrossplane.io key might be overwritten.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { spec+: { metadata+: { labels+: labels } } }, + }, + '#podSecurityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings.\\nOptional: Defaults to empty. See type description for default values of each field."'), + podSecurityContext: { + '#appArmorProfile':: d.obj(help='"appArmorProfile is the AppArmor options to use by the containers in this pod.\\nNote that this field cannot be set when spec.os.name is windows."'), + appArmorProfile: { + '#withLocalhostProfile':: d.fn(help='"localhostProfile indicates a profile loaded on the node that should be used.\\nThe profile must be preconfigured on the node to work.\\nMust match the loaded name of the profile.\\nMust be set if and only if type is \\"Localhost\\"."', args=[d.arg(name='localhostProfile', type=d.T.string)]), + withLocalhostProfile(localhostProfile): { spec+: { podSecurityContext+: { appArmorProfile+: { localhostProfile: localhostProfile } } } }, + '#withType':: d.fn(help="\"type indicates which kind of AppArmor profile will be applied.\\nValid options are:\\n Localhost - a profile pre-loaded on the node.\\n RuntimeDefault - the container runtime's default profile.\\n Unconfined - no AppArmor enforcement.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { podSecurityContext+: { appArmorProfile+: { type: type } } } }, + }, + '#seLinuxOptions':: d.obj(help='"The SELinux context to be applied to all containers.\\nIf unspecified, the container runtime will allocate a random SELinux context for each\\ncontainer. May also be set in SecurityContext. If set in\\nboth SecurityContext and PodSecurityContext, the value specified in SecurityContext\\ntakes precedence for that container.\\nNote that this field cannot be set when spec.os.name is windows."'), + seLinuxOptions: { + '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), + withLevel(level): { spec+: { podSecurityContext+: { seLinuxOptions+: { level: level } } } }, + '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), + withRole(role): { spec+: { podSecurityContext+: { seLinuxOptions+: { role: role } } } }, + '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { podSecurityContext+: { seLinuxOptions+: { type: type } } } }, + '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { spec+: { podSecurityContext+: { seLinuxOptions+: { user: user } } } }, + }, + '#seccompProfile':: d.obj(help='"The seccomp options to use by the containers in this pod.\\nNote that this field cannot be set when spec.os.name is windows."'), + seccompProfile: { + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used.\\nThe profile must be preconfigured on the node to work.\\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\\nMust be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + withLocalhostProfile(localhostProfile): { spec+: { podSecurityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } }, + '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied.\\nValid options are:\\n\\n\\nLocalhost - a profile defined in a file on the node should be used.\\nRuntimeDefault - the container runtime default profile should be used.\\nUnconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { podSecurityContext+: { seccompProfile+: { type: type } } } }, + }, + '#sysctls':: d.obj(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\\nsysctls (by the container runtime) might fail to launch.\\nNote that this field cannot be set when spec.os.name is windows."'), + sysctls: { + '#withName':: d.fn(help='"Name of a property to set"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"Value of a property to set"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#windowsOptions':: d.obj(help="\"The Windows specific settings applied to all containers.\\nIf unspecified, the options within a container's SecurityContext will be used.\\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is linux.\""), + windowsOptions: { + '#withGmsaCredentialSpec':: d.fn(help='"GMSACredentialSpec is where the GMSA admission webhook\\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\\nGMSA credential spec named by the GMSACredentialSpecName field."', args=[d.arg(name='gmsaCredentialSpec', type=d.T.string)]), + withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { podSecurityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } }, + '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), + withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { podSecurityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container.\\nAll of a Pod's containers must have the same effective HostProcess value\\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\\nIn addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { podSecurityContext+: { windowsOptions+: { hostProcess: hostProcess } } } }, + '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process.\\nDefaults to the user specified in image metadata if unspecified.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), + withRunAsUserName(runAsUserName): { spec+: { podSecurityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } }, + }, + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod.\\nSome volume types allow the Kubelet to change the ownership of that volume\\nto be owned by the pod:\\n\\n\\n1. The owning GID will be the FSGroup\\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\\n3. The permission bits are OR'd with rw-rw----\\n\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\\nNote that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + withFsGroup(fsGroup): { spec+: { podSecurityContext+: { fsGroup: fsGroup } } }, + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume\\nbefore being exposed inside Pod. This field will only apply to\\nvolume types which support fsGroup based ownership(and permissions).\\nIt will have no effect on ephemeral volume types such as: secret, configmaps\\nand emptydir.\\nValid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { podSecurityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } }, + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process.\\nUses runtime default if unset.\\nMay also be set in SecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence\\nfor that container.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + withRunAsGroup(runAsGroup): { spec+: { podSecurityContext+: { runAsGroup: runAsGroup } } }, + '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user.\\nIf true, the Kubelet will validate the image at runtime to ensure that it\\ndoes not run as UID 0 (root) and fail to start the container if it does.\\nIf unset or false, no such validation will be performed.\\nMay also be set in SecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), + withRunAsNonRoot(runAsNonRoot): { spec+: { podSecurityContext+: { runAsNonRoot: runAsNonRoot } } }, + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process.\\nDefaults to user specified in image metadata if unspecified.\\nMay also be set in SecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence\\nfor that container.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + withRunAsUser(runAsUser): { spec+: { podSecurityContext+: { runAsUser: runAsUser } } }, + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition\\nto the container's primary GID, the fsGroup (if specified), and group memberships\\ndefined in the container image for the uid of the container process. If unspecified,\\nno additional groups are added to any container. Note that group memberships\\ndefined in the container image for the uid of the container process are still effective,\\neven if they are not included in this list.\\nNote that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + withSupplementalGroups(supplementalGroups): { spec+: { podSecurityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } }, + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition\\nto the container's primary GID, the fsGroup (if specified), and group memberships\\ndefined in the container image for the uid of the container process. If unspecified,\\nno additional groups are added to any container. Note that group memberships\\ndefined in the container image for the uid of the container process are still effective,\\neven if they are not included in this list.\\nNote that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + withSupplementalGroupsMixin(supplementalGroups): { spec+: { podSecurityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } }, + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\\nsysctls (by the container runtime) might fail to launch.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), + withSysctls(sysctls): { spec+: { podSecurityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } }, + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\\nsysctls (by the container runtime) might fail to launch.\\nNote that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + withSysctlsMixin(sysctls): { spec+: { podSecurityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } }, + }, + '#ports':: d.obj(help='"List of container ports to expose on the container"'), + ports: { + '#withContainerPort':: d.fn(help="\"Number of port to expose on the pod's IP address.\\nThis must be a valid port number, 0 \u003c x \u003c 65536.\"", args=[d.arg(name='containerPort', type=d.T.integer)]), + withContainerPort(containerPort): { containerPort: containerPort }, + '#withHostIP':: d.fn(help='"What host IP to bind the external port to."', args=[d.arg(name='hostIP', type=d.T.string)]), + withHostIP(hostIP): { hostIP: hostIP }, + '#withHostPort':: d.fn(help='"Number of port to expose on the host.\\nIf specified, this must be a valid port number, 0 < x < 65536.\\nIf HostNetwork is specified, this must match ContainerPort.\\nMost containers do not need this."', args=[d.arg(name='hostPort', type=d.T.integer)]), + withHostPort(hostPort): { hostPort: hostPort }, + '#withName':: d.fn(help='"If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\\nnamed port in a pod must have a unique name. Name for the port that can be\\nreferred to by services."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withProtocol':: d.fn(help='"Protocol for port. Must be UDP, TCP, or SCTP.\\nDefaults to \\"TCP\\"."', args=[d.arg(name='protocol', type=d.T.string)]), + withProtocol(protocol): { protocol: protocol }, + }, + '#resources':: d.obj(help='"Compute Resources required by this container.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/"'), + resources: { + '#claims':: d.obj(help='"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable. It can only be set for containers."'), + claims: { + '#withName':: d.fn(help='"Name must match the name of one entry in pod.spec.resourceClaims of\\nthe Pod where this field is used. It makes that resource available\\ninside a container."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#withClaims':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable. It can only be set for containers."', args=[d.arg(name='claims', type=d.T.array)]), + withClaims(claims): { spec+: { resources+: { claims: if std.isArray(v=claims) then claims else [claims] } } }, + '#withClaimsMixin':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable. It can only be set for containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='claims', type=d.T.array)]), + withClaimsMixin(claims): { spec+: { resources+: { claims+: if std.isArray(v=claims) then claims else [claims] } } }, + '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), + withLimits(limits): { spec+: { resources+: { limits: limits } } }, + '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), + withLimitsMixin(limits): { spec+: { resources+: { limits+: limits } } }, + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value. Requests cannot exceed Limits.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + withRequests(requests): { spec+: { resources+: { requests: requests } } }, + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value. Requests cannot exceed Limits.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + withRequestsMixin(requests): { spec+: { resources+: { requests+: requests } } }, + }, + '#securityContext':: d.obj(help='"SecurityContext holds container-level security attributes and common container settings.\\nOptional: Defaults to empty. See type description for default values of each field."'), + securityContext: { + '#appArmorProfile':: d.obj(help="\"appArmorProfile is the AppArmor options to use by this container. If set, this profile\\noverrides the pod's appArmorProfile.\\nNote that this field cannot be set when spec.os.name is windows.\""), + appArmorProfile: { + '#withLocalhostProfile':: d.fn(help='"localhostProfile indicates a profile loaded on the node that should be used.\\nThe profile must be preconfigured on the node to work.\\nMust match the loaded name of the profile.\\nMust be set if and only if type is \\"Localhost\\"."', args=[d.arg(name='localhostProfile', type=d.T.string)]), + withLocalhostProfile(localhostProfile): { spec+: { securityContext+: { appArmorProfile+: { localhostProfile: localhostProfile } } } }, + '#withType':: d.fn(help="\"type indicates which kind of AppArmor profile will be applied.\\nValid options are:\\n Localhost - a profile pre-loaded on the node.\\n RuntimeDefault - the container runtime's default profile.\\n Unconfined - no AppArmor enforcement.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { securityContext+: { appArmorProfile+: { type: type } } } }, + }, + '#capabilities':: d.obj(help='"The capabilities to add/drop when running containers.\\nDefaults to the default set of capabilities granted by the container runtime.\\nNote that this field cannot be set when spec.os.name is windows."'), + capabilities: { + '#withAdd':: d.fn(help='"Added capabilities"', args=[d.arg(name='add', type=d.T.array)]), + withAdd(add): { spec+: { securityContext+: { capabilities+: { add: if std.isArray(v=add) then add else [add] } } } }, + '#withAddMixin':: d.fn(help='"Added capabilities"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='add', type=d.T.array)]), + withAddMixin(add): { spec+: { securityContext+: { capabilities+: { add+: if std.isArray(v=add) then add else [add] } } } }, + '#withDrop':: d.fn(help='"Removed capabilities"', args=[d.arg(name='drop', type=d.T.array)]), + withDrop(drop): { spec+: { securityContext+: { capabilities+: { drop: if std.isArray(v=drop) then drop else [drop] } } } }, + '#withDropMixin':: d.fn(help='"Removed capabilities"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='drop', type=d.T.array)]), + withDropMixin(drop): { spec+: { securityContext+: { capabilities+: { drop+: if std.isArray(v=drop) then drop else [drop] } } } }, + }, + '#seLinuxOptions':: d.obj(help='"The SELinux context to be applied to the container.\\nIf unspecified, the container runtime will allocate a random SELinux context for each\\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows."'), + seLinuxOptions: { + '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), + withLevel(level): { spec+: { securityContext+: { seLinuxOptions+: { level: level } } } }, + '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), + withRole(role): { spec+: { securityContext+: { seLinuxOptions+: { role: role } } } }, + '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { securityContext+: { seLinuxOptions+: { type: type } } } }, + '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { spec+: { securityContext+: { seLinuxOptions+: { user: user } } } }, + }, + '#seccompProfile':: d.obj(help='"The seccomp options to use by this container. If seccomp options are\\nprovided at both the pod & container level, the container options\\noverride the pod options.\\nNote that this field cannot be set when spec.os.name is windows."'), + seccompProfile: { + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used.\\nThe profile must be preconfigured on the node to work.\\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\\nMust be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + withLocalhostProfile(localhostProfile): { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } }, + '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied.\\nValid options are:\\n\\n\\nLocalhost - a profile defined in a file on the node should be used.\\nRuntimeDefault - the container runtime default profile should be used.\\nUnconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { securityContext+: { seccompProfile+: { type: type } } } }, + }, + '#windowsOptions':: d.obj(help='"The Windows specific settings applied to all containers.\\nIf unspecified, the options from the PodSecurityContext will be used.\\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is linux."'), + windowsOptions: { + '#withGmsaCredentialSpec':: d.fn(help='"GMSACredentialSpec is where the GMSA admission webhook\\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\\nGMSA credential spec named by the GMSACredentialSpecName field."', args=[d.arg(name='gmsaCredentialSpec', type=d.T.string)]), + withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } }, + '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), + withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container.\\nAll of a Pod's containers must have the same effective HostProcess value\\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\\nIn addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } }, + '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process.\\nDefaults to the user specified in image metadata if unspecified.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), + withRunAsUserName(runAsUserName): { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } }, + }, + '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more\\nprivileges than its parent process. This bool directly controls if\\nthe no_new_privs flag will be set on the container process.\\nAllowPrivilegeEscalation is true always when the container is:\\n1) run as Privileged\\n2) has CAP_SYS_ADMIN\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), + withAllowPrivilegeEscalation(allowPrivilegeEscalation): { spec+: { securityContext+: { allowPrivilegeEscalation: allowPrivilegeEscalation } } }, + '#withPrivileged':: d.fn(help='"Run container in privileged mode.\\nProcesses in privileged containers are essentially equivalent to root on the host.\\nDefaults to false.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='privileged', type=d.T.boolean)]), + withPrivileged(privileged): { spec+: { securityContext+: { privileged: privileged } } }, + '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers.\\nThe default is DefaultProcMount which uses the container runtime defaults for\\nreadonly paths and masked paths.\\nThis requires the ProcMountType feature flag to be enabled.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='procMount', type=d.T.string)]), + withProcMount(procMount): { spec+: { securityContext+: { procMount: procMount } } }, + '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem.\\nDefault is false.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), + withReadOnlyRootFilesystem(readOnlyRootFilesystem): { spec+: { securityContext+: { readOnlyRootFilesystem: readOnlyRootFilesystem } } }, + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process.\\nUses runtime default if unset.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + withRunAsGroup(runAsGroup): { spec+: { securityContext+: { runAsGroup: runAsGroup } } }, + '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user.\\nIf true, the Kubelet will validate the image at runtime to ensure that it\\ndoes not run as UID 0 (root) and fail to start the container if it does.\\nIf unset or false, no such validation will be performed.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), + withRunAsNonRoot(runAsNonRoot): { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } }, + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process.\\nDefaults to user specified in image metadata if unspecified.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + withRunAsUser(runAsUser): { spec+: { securityContext+: { runAsUser: runAsUser } } }, + }, + '#tolerations':: d.obj(help="\"If specified, the pod's tolerations.\""), + tolerations: { + '#withEffect':: d.fn(help='"Effect indicates the taint effect to match. Empty means match all taint effects.\\nWhen specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute."', args=[d.arg(name='effect', type=d.T.string)]), + withEffect(effect): { effect: effect }, + '#withKey':: d.fn(help='"Key is the taint key that the toleration applies to. Empty means match all taint keys.\\nIf the key is empty, operator must be Exists; this combination means to match all values and all keys."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"Operator represents a key's relationship to the value.\\nValid operators are Exists and Equal. Defaults to Equal.\\nExists is equivalent to wildcard for value, so that a pod can\\ntolerate all taints of a particular category.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withTolerationSeconds':: d.fn(help='"TolerationSeconds represents the period of time the toleration (which must be\\nof effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,\\nit is not set, which means tolerate the taint forever (do not evict). Zero and\\nnegative values will be treated as 0 (evict immediately) by the system."', args=[d.arg(name='tolerationSeconds', type=d.T.integer)]), + withTolerationSeconds(tolerationSeconds): { tolerationSeconds: tolerationSeconds }, + '#withValue':: d.fn(help='"Value is the taint value the toleration matches to.\\nIf the operator is Exists, the value should be empty, otherwise just a regular string."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#volumeMounts':: d.obj(help="\"List of VolumeMounts to mount into the container's filesystem.\\nCannot be updated.\""), + volumeMounts: { + '#withMountPath':: d.fn(help="\"Path within the container at which the volume should be mounted. Must\\nnot contain ':'.\"", args=[d.arg(name='mountPath', type=d.T.string)]), + withMountPath(mountPath): { mountPath: mountPath }, + '#withMountPropagation':: d.fn(help='"mountPropagation determines how mounts are propagated from the host\\nto container and the other way around.\\nWhen not set, MountPropagationNone is used.\\nThis field is beta in 1.10.\\nWhen RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified\\n(which defaults to None)."', args=[d.arg(name='mountPropagation', type=d.T.string)]), + withMountPropagation(mountPropagation): { mountPropagation: mountPropagation }, + '#withName':: d.fn(help='"This must match the Name of a Volume."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withReadOnly':: d.fn(help='"Mounted read-only if true, read-write otherwise (false or unspecified).\\nDefaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withRecursiveReadOnly':: d.fn(help='"RecursiveReadOnly specifies whether read-only mounts should be handled\\nrecursively.\\n\\n\\nIf ReadOnly is false, this field has no meaning and must be unspecified.\\n\\n\\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made\\nrecursively read-only. If this field is set to IfPossible, the mount is made\\nrecursively read-only, if it is supported by the container runtime. If this\\nfield is set to Enabled, the mount is made recursively read-only if it is\\nsupported by the container runtime, otherwise the pod will not be started and\\nan error will be generated to indicate the reason.\\n\\n\\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to\\nNone (or be unspecified, which defaults to None).\\n\\n\\nIf this field is not specified, it is treated as an equivalent of Disabled."', args=[d.arg(name='recursiveReadOnly', type=d.T.string)]), + withRecursiveReadOnly(recursiveReadOnly): { recursiveReadOnly: recursiveReadOnly }, + '#withSubPath':: d.fn(help="\"Path within the volume from which the container's volume should be mounted.\\nDefaults to \\\"\\\" (volume's root).\"", args=[d.arg(name='subPath', type=d.T.string)]), + withSubPath(subPath): { subPath: subPath }, + '#withSubPathExpr':: d.fn(help="\"Expanded path within the volume from which the container's volume should be mounted.\\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\\nDefaults to \\\"\\\" (volume's root).\\nSubPathExpr and SubPath are mutually exclusive.\"", args=[d.arg(name='subPathExpr', type=d.T.string)]), + withSubPathExpr(subPathExpr): { subPathExpr: subPathExpr }, + }, + '#volumes':: d.obj(help='"List of volumes that can be mounted by containers belonging to the pod.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes"'), + volumes: { + '#awsElasticBlockStore':: d.obj(help="\"awsElasticBlockStore represents an AWS Disk resource that is attached to a\\nkubelet's host machine and then exposed to the pod.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\""), + awsElasticBlockStore: { + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\\nTODO: how do we prevent errors in the filesystem from compromising the machine"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { awsElasticBlockStore+: { fsType: fsType } }, + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount.\\nIf omitted, the default is to mount by volume name.\\nExamples: For volume /dev/sda1, you specify the partition as \\"1\\".\\nSimilarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + withPartition(partition): { awsElasticBlockStore+: { partition: partition } }, + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { awsElasticBlockStore+: { readOnly: readOnly } }, + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + withVolumeID(volumeID): { awsElasticBlockStore+: { volumeID: volumeID } }, + }, + '#azureDisk':: d.obj(help='"azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), + azureDisk: { + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + withCachingMode(cachingMode): { azureDisk+: { cachingMode: cachingMode } }, + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + withDiskName(diskName): { azureDisk+: { diskName: diskName } }, + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + withDiskURI(diskURI): { azureDisk+: { diskURI: diskURI } }, + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { azureDisk+: { fsType: fsType } }, + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { azureDisk+: { kind: kind } }, + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { azureDisk+: { readOnly: readOnly } }, + }, + '#azureFile':: d.obj(help='"azureFile represents an Azure File Service mount on the host and bind mount to the pod."'), + azureFile: { + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { azureFile+: { readOnly: readOnly } }, + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + withSecretName(secretName): { azureFile+: { secretName: secretName } }, + '#withShareName':: d.fn(help='"shareName is the azure share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + withShareName(shareName): { azureFile+: { shareName: shareName } }, + }, + '#cephfs':: d.obj(help="\"cephFS represents a Ceph FS mount on the host that shares a pod's lifetime\""), + cephfs: { + '#secretRef':: d.obj(help='"secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { cephfs+: { secretRef+: { name: name } } }, + }, + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitors(monitors): { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } }, + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitorsMixin(monitors): { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } }, + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { cephfs+: { path: path } }, + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { cephfs+: { readOnly: readOnly } }, + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + withSecretFile(secretFile): { cephfs+: { secretFile: secretFile } }, + '#withUser':: d.fn(help='"user is optional: User is the rados user name, default is admin\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { cephfs+: { user: user } }, + }, + '#cinder':: d.obj(help='"cinder represents a cinder volume attached and mounted on kubelets host machine.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md"'), + cinder: { + '#secretRef':: d.obj(help='"secretRef is optional: points to a secret object containing parameters used to connect\\nto OpenStack."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { cinder+: { secretRef+: { name: name } } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nExamples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { cinder+: { fsType: fsType } }, + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { cinder+: { readOnly: readOnly } }, + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + withVolumeID(volumeID): { cinder+: { volumeID: volumeID } }, + }, + '#configMap':: d.obj(help='"configMap represents a configMap that should populate this volume"'), + configMap: { + '#items':: d.obj(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\""), + items: { + '#withKey':: d.fn(help='"key is the key to project."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withMode':: d.fn(help='"mode is Optional: mode bits used to set permissions on this file.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"path is the relative path of the file to map the key to.\\nMay not be an absolute path.\\nMay not contain the path element '..'.\\nMay not start with the string '..'.\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + }, + '#withDefaultMode':: d.fn(help='"defaultMode is optional: mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nDefaults to 0644.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { configMap+: { defaultMode: defaultMode } }, + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { configMap+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { configMap+: { items+: if std.isArray(v=items) then items else [items] } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { configMap+: { name: name } }, + '#withOptional':: d.fn(help='"optional specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { configMap+: { optional: optional } }, + }, + '#csi':: d.obj(help='"csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature)."'), + csi: { + '#nodePublishSecretRef':: d.obj(help='"nodePublishSecretRef is a reference to the secret object containing\\nsensitive information to pass to the CSI driver to complete the CSI\\nNodePublishVolume and NodeUnpublishVolume calls.\\nThis field is optional, and may be empty if no secret is required. If the\\nsecret object contains more than one secret, all secret references are passed."'), + nodePublishSecretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { csi+: { nodePublishSecretRef+: { name: name } } }, + }, + '#withDriver':: d.fn(help='"driver is the name of the CSI driver that handles this volume.\\nConsult with your admin for the correct name as registered in the cluster."', args=[d.arg(name='driver', type=d.T.string)]), + withDriver(driver): { csi+: { driver: driver } }, + '#withFsType':: d.fn(help='"fsType to mount. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\".\\nIf not provided, the empty value is passed to the associated CSI driver\\nwhich will determine the default filesystem to apply."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { csi+: { fsType: fsType } }, + '#withReadOnly':: d.fn(help='"readOnly specifies a read-only configuration for the volume.\\nDefaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { csi+: { readOnly: readOnly } }, + '#withVolumeAttributes':: d.fn(help="\"volumeAttributes stores driver-specific properties that are passed to the CSI\\ndriver. Consult your driver's documentation for supported values.\"", args=[d.arg(name='volumeAttributes', type=d.T.object)]), + withVolumeAttributes(volumeAttributes): { csi+: { volumeAttributes: volumeAttributes } }, + '#withVolumeAttributesMixin':: d.fn(help="\"volumeAttributes stores driver-specific properties that are passed to the CSI\\ndriver. Consult your driver's documentation for supported values.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeAttributes', type=d.T.object)]), + withVolumeAttributesMixin(volumeAttributes): { csi+: { volumeAttributes+: volumeAttributes } }, + }, + '#downwardAPI':: d.obj(help='"downwardAPI represents downward API about the pod that should populate this volume"'), + downwardAPI: { + '#items':: d.obj(help='"Items is a list of downward API volume file"'), + items: { + '#fieldRef':: d.obj(help='"Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported."'), + fieldRef: { + '#withApiVersion':: d.fn(help='"Version of the schema the FieldPath is written in terms of, defaults to \\"v1\\"."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { fieldRef+: { apiVersion: apiVersion } }, + '#withFieldPath':: d.fn(help='"Path of the field to select in the specified API version."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { fieldRef+: { fieldPath: fieldPath } }, + }, + '#resourceFieldRef':: d.obj(help='"Selects a resource of the container: only resources limits and requests\\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported."'), + resourceFieldRef: { + '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { resourceFieldRef+: { containerName: containerName } }, + '#withDivisor':: d.fn(help='"Specifies the output format of the exposed resources, defaults to \\"1\\', args=[d.arg(name='divisor', type=d.T.any)]), + withDivisor(divisor): { resourceFieldRef+: { divisor: divisor } }, + '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { resourceFieldRef+: { resource: resource } }, + }, + '#withMode':: d.fn(help='"Optional: mode bits used to set permissions on this file, must be an octal value\\nbetween 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + }, + '#withDefaultMode':: d.fn(help='"Optional: mode bits to use on created files by default. Must be a\\nOptional: mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nDefaults to 0644.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { downwardAPI+: { defaultMode: defaultMode } }, + '#withItems':: d.fn(help='"Items is a list of downward API volume file"', args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { downwardAPI+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help='"Items is a list of downward API volume file"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { downwardAPI+: { items+: if std.isArray(v=items) then items else [items] } }, + }, + '#emptyDir':: d.obj(help="\"emptyDir represents a temporary directory that shares a pod's lifetime.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\""), + emptyDir: { + '#withMedium':: d.fn(help="\"medium represents what type of storage medium should back this directory.\\nThe default is \\\"\\\" which means to use the node's default medium.\\nMust be an empty string (default) or Memory.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\"", args=[d.arg(name='medium', type=d.T.string)]), + withMedium(medium): { emptyDir+: { medium: medium } }, + '#withSizeLimit':: d.fn(help='"sizeLimit is the total amount of local storage required for this EmptyDir volume.\\nThe size limit is also applicable for memory medium.\\nThe maximum usage on memory medium EmptyDir would be the minimum value between\\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\\nThe default is nil which means that the limit is undefined.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir"', args=[d.arg(name='sizeLimit', type=d.T.any)]), + withSizeLimit(sizeLimit): { emptyDir+: { sizeLimit: sizeLimit } }, + }, + '#ephemeral':: d.obj(help="\"ephemeral represents a volume that is handled by a cluster storage driver.\\nThe volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,\\nand deleted when the pod is removed.\\n\\n\\nUse this if:\\na) the volume is only needed while the pod runs,\\nb) features of normal volumes like restoring from snapshot or capacity\\n tracking are needed,\\nc) the storage driver is specified through a storage class, and\\nd) the storage driver supports dynamic volume provisioning through\\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\\n information on the connection between this volume type\\n and PersistentVolumeClaim).\\n\\n\\nUse PersistentVolumeClaim or one of the vendor-specific\\nAPIs for volumes that persist for longer than the lifecycle\\nof an individual pod.\\n\\n\\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to\\nbe used that way - see the documentation of the driver for\\nmore information.\\n\\n\\nA pod can use both types of ephemeral volumes and\\npersistent volumes at the same time.\""), + ephemeral: { + '#volumeClaimTemplate':: d.obj(help='"Will be used to create a stand-alone PVC to provision the volume.\\nThe pod in which this EphemeralVolumeSource is embedded will be the\\nowner of the PVC, i.e. the PVC will be deleted together with the\\npod. The name of the PVC will be `-` where\\n`` is the name from the `PodSpec.Volumes` array\\nentry. Pod validation will reject the pod if the concatenated name\\nis not valid for a PVC (for example, too long).\\n\\n\\nAn existing PVC with that name that is not owned by the pod\\nwill *not* be used for the pod to avoid using an unrelated\\nvolume by mistake. Starting the pod is then blocked until\\nthe unrelated PVC is removed. If such a pre-created PVC is\\nmeant to be used by the pod, the PVC has to updated with an\\nowner reference to the pod once the pod exists. Normally\\nthis should not be necessary, but it may be useful when\\nmanually reconstructing a broken cluster.\\n\\n\\nThis field is read-only and no changes will be made by Kubernetes\\nto the PVC after it has been created.\\n\\n\\nRequired, must not be nil."'), + volumeClaimTemplate: { + '#metadata':: d.obj(help='"May contain labels and annotations that will be copied into the PVC\\nwhen creating it. No other fields are allowed and will be rejected during\\nvalidation."'), + metadata: { + '#withAnnotations':: d.fn(help='', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { ephemeral+: { volumeClaimTemplate+: { metadata+: { annotations: annotations } } } }, + '#withAnnotationsMixin':: d.fn(help='\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { ephemeral+: { volumeClaimTemplate+: { metadata+: { annotations+: annotations } } } }, + '#withFinalizers':: d.fn(help='', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { ephemeral+: { volumeClaimTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, + '#withFinalizersMixin':: d.fn(help='\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { ephemeral+: { volumeClaimTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, + '#withLabels':: d.fn(help='', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { ephemeral+: { volumeClaimTemplate+: { metadata+: { labels: labels } } } }, + '#withLabelsMixin':: d.fn(help='\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { ephemeral+: { volumeClaimTemplate+: { metadata+: { labels+: labels } } } }, + '#withName':: d.fn(help='', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { ephemeral+: { volumeClaimTemplate+: { metadata+: { name: name } } } }, + '#withNamespace':: d.fn(help='', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { ephemeral+: { volumeClaimTemplate+: { metadata+: { namespace: namespace } } } }, + }, + '#spec':: d.obj(help='"The specification for the PersistentVolumeClaim. The entire content is\\ncopied unchanged into the PVC that gets created from this\\ntemplate. The same fields as in a PersistentVolumeClaim\\nare also valid here."'), + spec: { + '#dataSource':: d.obj(help='"dataSource field can be used to specify either:\\n* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)\\n* An existing PVC (PersistentVolumeClaim)\\nIf the provisioner or an external controller can support the specified data source,\\nit will create a new volume based on the contents of the specified data source.\\nWhen the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,\\nand dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.\\nIf the namespace is specified, then dataSourceRef will not be copied to dataSource."'), + dataSource: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced.\\nIf APIGroup is not specified, the specified Kind must be in the core API group.\\nFor any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSource+: { apiGroup: apiGroup } } } } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSource+: { kind: kind } } } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSource+: { name: name } } } } }, + }, + '#dataSourceRef':: d.obj(help="\"dataSourceRef specifies the object from which to populate the volume with data, if a non-empty\\nvolume is desired. This may be any object from a non-empty API group (non\\ncore object) or a PersistentVolumeClaim object.\\nWhen this field is specified, volume binding will only succeed if the type of\\nthe specified object matches some installed volume populator or dynamic\\nprovisioner.\\nThis field will replace the functionality of the dataSource field and as such\\nif both fields are non-empty, they must have the same value. For backwards\\ncompatibility, when namespace isn't specified in dataSourceRef,\\nboth fields (dataSource and dataSourceRef) will be set to the same\\nvalue automatically if one of them is empty and the other is non-empty.\\nWhen namespace is specified in dataSourceRef,\\ndataSource isn't set to the same value and must be empty.\\nThere are three important differences between dataSource and dataSourceRef:\\n* While dataSource only allows two specific types of objects, dataSourceRef\\n allows any non-core object, as well as PersistentVolumeClaim objects.\\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\\n preserves all values, and generates an error if a disallowed value is\\n specified.\\n* While dataSource only allows local objects, dataSourceRef allows objects\\n in any namespaces.\\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\\n(Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\""), + dataSourceRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced.\\nIf APIGroup is not specified, the specified Kind must be in the core API group.\\nFor any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { apiGroup: apiGroup } } } } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { kind: kind } } } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { name: name } } } } }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced\\nNote that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.\\n(Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { namespace: namespace } } } } }, + }, + '#resources':: d.obj(help='"resources represents the minimum resources the volume should have.\\nIf RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements\\nthat are lower than previous value but must still be higher than capacity recorded in the\\nstatus field of the claim.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources"'), + resources: { + '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), + withLimits(limits): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { limits: limits } } } } }, + '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), + withLimitsMixin(limits): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { limits+: limits } } } } }, + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value. Requests cannot exceed Limits.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + withRequests(requests): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { requests: requests } } } } }, + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value. Requests cannot exceed Limits.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + withRequestsMixin(requests): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { requests+: requests } } } } }, + }, + '#selector':: d.obj(help='"selector is a label query over volumes to consider for binding."'), + selector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { ephemeral+: { volumeClaimTemplate+: { spec+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { ephemeral+: { volumeClaimTemplate+: { spec+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { ephemeral+: { volumeClaimTemplate+: { spec+: { selector+: { matchLabels: matchLabels } } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { ephemeral+: { volumeClaimTemplate+: { spec+: { selector+: { matchLabels+: matchLabels } } } } }, + }, + '#withAccessModes':: d.fn(help='"accessModes contains the desired access modes the volume should have.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + withAccessModes(accessModes): { ephemeral+: { volumeClaimTemplate+: { spec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, + '#withAccessModesMixin':: d.fn(help='"accessModes contains the desired access modes the volume should have.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + withAccessModesMixin(accessModes): { ephemeral+: { volumeClaimTemplate+: { spec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, + '#withStorageClassName':: d.fn(help='"storageClassName is the name of the StorageClass required by the claim.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), + withStorageClassName(storageClassName): { ephemeral+: { volumeClaimTemplate+: { spec+: { storageClassName: storageClassName } } } }, + '#withVolumeAttributesClassName':: d.fn(help="\"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.\\nIf specified, the CSI driver will create or update the volume with the attributes defined\\nin the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,\\nit can be changed after the claim is created. An empty string value means that no VolumeAttributesClass\\nwill be applied to the claim but it's not allowed to reset this field to empty string once it is set.\\nIf unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass\\nwill be set by the persistentvolume controller if it exists.\\nIf the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be\\nset to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource\\nexists.\\nMore info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/\\n(Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\"", args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { ephemeral+: { volumeClaimTemplate+: { spec+: { volumeAttributesClassName: volumeAttributesClassName } } } }, + '#withVolumeMode':: d.fn(help='"volumeMode defines what type of volume is required by the claim.\\nValue of Filesystem is implied when not included in claim spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), + withVolumeMode(volumeMode): { ephemeral+: { volumeClaimTemplate+: { spec+: { volumeMode: volumeMode } } } }, + '#withVolumeName':: d.fn(help='"volumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), + withVolumeName(volumeName): { ephemeral+: { volumeClaimTemplate+: { spec+: { volumeName: volumeName } } } }, + }, + }, + }, + '#fc':: d.obj(help="\"fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.\""), + fc: { + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified.\\nTODO: how do we prevent errors in the filesystem from compromising the machine"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fc+: { fsType: fsType } }, + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + withLun(lun): { fc+: { lun: lun } }, + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { fc+: { readOnly: readOnly } }, + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + withTargetWWNs(targetWWNs): { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } }, + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + withTargetWWNsMixin(targetWWNs): { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } }, + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids)\\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + withWwids(wwids): { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } }, + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids)\\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + withWwidsMixin(wwids): { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } }, + }, + '#flexVolume':: d.obj(help='"flexVolume represents a generic volume resource that is\\nprovisioned/attached using an exec based plugin."'), + flexVolume: { + '#secretRef':: d.obj(help='"secretRef is Optional: secretRef is reference to the secret object containing\\nsensitive information to pass to the plugin scripts. This may be\\nempty if no secret object is specified. If the secret object\\ncontains more than one secret, all secrets are passed to the plugin\\nscripts."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { flexVolume+: { secretRef+: { name: name } } }, + }, + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + withDriver(driver): { flexVolume+: { driver: driver } }, + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { flexVolume+: { fsType: fsType } }, + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + withOptions(options): { flexVolume+: { options: options } }, + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + withOptionsMixin(options): { flexVolume+: { options+: options } }, + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { flexVolume+: { readOnly: readOnly } }, + }, + '#flocker':: d.obj(help="\"flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running\""), + flocker: { + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker\\nshould be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + withDatasetName(datasetName): { flocker+: { datasetName: datasetName } }, + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + withDatasetUUID(datasetUUID): { flocker+: { datasetUUID: datasetUUID } }, + }, + '#gcePersistentDisk':: d.obj(help="\"gcePersistentDisk represents a GCE Disk resource that is attached to a\\nkubelet's host machine and then exposed to the pod.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\""), + gcePersistentDisk: { + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\\nTODO: how do we prevent errors in the filesystem from compromising the machine"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { gcePersistentDisk+: { fsType: fsType } }, + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount.\\nIf omitted, the default is to mount by volume name.\\nExamples: For volume /dev/sda1, you specify the partition as \\"1\\".\\nSimilarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty).\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + withPartition(partition): { gcePersistentDisk+: { partition: partition } }, + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + withPdName(pdName): { gcePersistentDisk+: { pdName: pdName } }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts.\\nDefaults to false.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { gcePersistentDisk+: { readOnly: readOnly } }, + }, + '#gitRepo':: d.obj(help="\"gitRepo represents a git repository at a particular revision.\\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\\ninto the Pod's container.\""), + gitRepo: { + '#withDirectory':: d.fn(help="\"directory is the target directory name.\\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\\ngit repository. Otherwise, if specified, the volume will contain the git repository in\\nthe subdirectory with the given name.\"", args=[d.arg(name='directory', type=d.T.string)]), + withDirectory(directory): { gitRepo+: { directory: directory } }, + '#withRepository':: d.fn(help='"repository is the URL"', args=[d.arg(name='repository', type=d.T.string)]), + withRepository(repository): { gitRepo+: { repository: repository } }, + '#withRevision':: d.fn(help='"revision is the commit hash for the specified revision."', args=[d.arg(name='revision', type=d.T.string)]), + withRevision(revision): { gitRepo+: { revision: revision } }, + }, + '#glusterfs':: d.obj(help="\"glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md\""), + glusterfs: { + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + withEndpoints(endpoints): { glusterfs+: { endpoints: endpoints } }, + '#withPath':: d.fn(help='"path is the Glusterfs volume path.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { glusterfs+: { path: path } }, + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions.\\nDefaults to false.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { glusterfs+: { readOnly: readOnly } }, + }, + '#hostPath':: d.obj(help='"hostPath represents a pre-existing file or directory on the host\\nmachine that is directly exposed to the container. This is generally\\nused for system agents or other privileged things that are allowed\\nto see the host machine. Most containers will NOT need this.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\\n---\\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\\nmount host directories as read/write."'), + hostPath: { + '#withPath':: d.fn(help='"path of the directory on the host.\\nIf the path is a symlink, it will follow the link to the real path.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { hostPath+: { path: path } }, + '#withType':: d.fn(help='"type for HostPath Volume\\nDefaults to \\"\\"\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { hostPath+: { type: type } }, + }, + '#iscsi':: d.obj(help="\"iscsi represents an ISCSI Disk resource that is attached to a\\nkubelet's host machine and then exposed to the pod.\\nMore info: https://examples.k8s.io/volumes/iscsi/README.md\""), + iscsi: { + '#secretRef':: d.obj(help='"secretRef is the CHAP Secret for iSCSI target and initiator authentication"'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { iscsi+: { secretRef+: { name: name } } }, + }, + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + withChapAuthDiscovery(chapAuthDiscovery): { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } }, + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + withChapAuthSession(chapAuthSession): { iscsi+: { chapAuthSession: chapAuthSession } }, + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\\nTODO: how do we prevent errors in the filesystem from compromising the machine"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { iscsi+: { fsType: fsType } }, + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name.\\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\\n: will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + withInitiatorName(initiatorName): { iscsi+: { initiatorName: initiatorName } }, + '#withIqn':: d.fn(help='"iqn is the target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + withIqn(iqn): { iscsi+: { iqn: iqn } }, + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport.\\nDefaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + withIscsiInterface(iscsiInterface): { iscsi+: { iscsiInterface: iscsiInterface } }, + '#withLun':: d.fn(help='"lun represents iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + withLun(lun): { iscsi+: { lun: lun } }, + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\\nis other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + withPortals(portals): { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } }, + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\\nis other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + withPortalsMixin(portals): { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts.\\nDefaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { iscsi+: { readOnly: readOnly } }, + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\\nis other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + withTargetPortal(targetPortal): { iscsi+: { targetPortal: targetPortal } }, + }, + '#nfs':: d.obj(help="\"nfs represents an NFS mount on the host that shares a pod's lifetime\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\""), + nfs: { + '#withPath':: d.fn(help='"path that is exported by the NFS server.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { nfs+: { path: path } }, + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions.\\nDefaults to false.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { nfs+: { readOnly: readOnly } }, + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + withServer(server): { nfs+: { server: server } }, + }, + '#persistentVolumeClaim':: d.obj(help='"persistentVolumeClaimVolumeSource represents a reference to a\\nPersistentVolumeClaim in the same namespace.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"'), + persistentVolumeClaim: { + '#withClaimName':: d.fn(help='"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"', args=[d.arg(name='claimName', type=d.T.string)]), + withClaimName(claimName): { persistentVolumeClaim+: { claimName: claimName } }, + '#withReadOnly':: d.fn(help='"readOnly Will force the ReadOnly setting in VolumeMounts.\\nDefault false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { persistentVolumeClaim+: { readOnly: readOnly } }, + }, + '#photonPersistentDisk':: d.obj(help='"photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"'), + photonPersistentDisk: { + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { photonPersistentDisk+: { fsType: fsType } }, + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + withPdID(pdID): { photonPersistentDisk+: { pdID: pdID } }, + }, + '#portworxVolume':: d.obj(help='"portworxVolume represents a portworx volume attached and mounted on kubelets host machine"'), + portworxVolume: { + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { portworxVolume+: { fsType: fsType } }, + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { portworxVolume+: { readOnly: readOnly } }, + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + withVolumeID(volumeID): { portworxVolume+: { volumeID: volumeID } }, + }, + '#projected':: d.obj(help='"projected items for all in one resources secrets, configmaps, and downward API"'), + projected: { + '#sources':: d.obj(help='"sources is the list of volume projections"'), + sources: { + '#clusterTrustBundle':: d.obj(help='"ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field\\nof ClusterTrustBundle objects in an auto-updating file.\\n\\n\\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\\n\\n\\nClusterTrustBundle objects can either be selected by name, or by the\\ncombination of signer name and a label selector.\\n\\n\\nKubelet performs aggressive normalization of the PEM contents written\\ninto the pod filesystem. Esoteric PEM features such as inter-block\\ncomments and block headers are stripped. Certificates are deduplicated.\\nThe ordering of certificates within the file is arbitrary, and Kubelet\\nmay change the order over time."'), + clusterTrustBundle: { + '#labelSelector':: d.obj(help='"Select all ClusterTrustBundles that match this label selector. Only has\\neffect if signerName is set. Mutually-exclusive with name. If unset,\\ninterpreted as \\"match nothing\\". If set but empty, interpreted as \\"match\\neverything\\"."'), + labelSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { clusterTrustBundle+: { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { clusterTrustBundle+: { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { clusterTrustBundle+: { labelSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { clusterTrustBundle+: { labelSelector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"Select a single ClusterTrustBundle by object name. Mutually-exclusive\\nwith signerName and labelSelector."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { clusterTrustBundle+: { name: name } }, + '#withOptional':: d.fn(help="\"If true, don't block pod startup if the referenced ClusterTrustBundle(s)\\naren't available. If using name, then the named ClusterTrustBundle is\\nallowed not to exist. If using signerName, then the combination of\\nsignerName and labelSelector is allowed to match zero\\nClusterTrustBundles.\"", args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { clusterTrustBundle+: { optional: optional } }, + '#withPath':: d.fn(help='"Relative path from the volume root to write the bundle."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { clusterTrustBundle+: { path: path } }, + '#withSignerName':: d.fn(help='"Select all ClusterTrustBundles that match this signer name.\\nMutually-exclusive with name. The contents of all selected\\nClusterTrustBundles will be unified and deduplicated."', args=[d.arg(name='signerName', type=d.T.string)]), + withSignerName(signerName): { clusterTrustBundle+: { signerName: signerName } }, + }, + '#configMap':: d.obj(help='"configMap information about the configMap data to project"'), + configMap: { + '#items':: d.obj(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\""), + items: { + '#withKey':: d.fn(help='"key is the key to project."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withMode':: d.fn(help='"mode is Optional: mode bits used to set permissions on this file.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"path is the relative path of the file to map the key to.\\nMay not be an absolute path.\\nMay not contain the path element '..'.\\nMay not start with the string '..'.\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + }, + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { configMap+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { configMap+: { items+: if std.isArray(v=items) then items else [items] } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { configMap+: { name: name } }, + '#withOptional':: d.fn(help='"optional specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { configMap+: { optional: optional } }, + }, + '#downwardAPI':: d.obj(help='"downwardAPI information about the downwardAPI data to project"'), + downwardAPI: { + '#items':: d.obj(help='"Items is a list of DownwardAPIVolume file"'), + items: { + '#fieldRef':: d.obj(help='"Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported."'), + fieldRef: { + '#withApiVersion':: d.fn(help='"Version of the schema the FieldPath is written in terms of, defaults to \\"v1\\"."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { fieldRef+: { apiVersion: apiVersion } }, + '#withFieldPath':: d.fn(help='"Path of the field to select in the specified API version."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { fieldRef+: { fieldPath: fieldPath } }, + }, + '#resourceFieldRef':: d.obj(help='"Selects a resource of the container: only resources limits and requests\\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported."'), + resourceFieldRef: { + '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { resourceFieldRef+: { containerName: containerName } }, + '#withDivisor':: d.fn(help='"Specifies the output format of the exposed resources, defaults to \\"1\\', args=[d.arg(name='divisor', type=d.T.any)]), + withDivisor(divisor): { resourceFieldRef+: { divisor: divisor } }, + '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { resourceFieldRef+: { resource: resource } }, + }, + '#withMode':: d.fn(help='"Optional: mode bits used to set permissions on this file, must be an octal value\\nbetween 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + }, + '#withItems':: d.fn(help='"Items is a list of DownwardAPIVolume file"', args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { downwardAPI+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help='"Items is a list of DownwardAPIVolume file"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { downwardAPI+: { items+: if std.isArray(v=items) then items else [items] } }, + }, + '#secret':: d.obj(help='"secret information about the secret data to project"'), + secret: { + '#items':: d.obj(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\""), + items: { + '#withKey':: d.fn(help='"key is the key to project."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withMode':: d.fn(help='"mode is Optional: mode bits used to set permissions on this file.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"path is the relative path of the file to map the key to.\\nMay not be an absolute path.\\nMay not contain the path element '..'.\\nMay not start with the string '..'.\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + }, + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { secret+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { secret+: { items+: if std.isArray(v=items) then items else [items] } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secret+: { name: name } }, + '#withOptional':: d.fn(help='"optional field specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { secret+: { optional: optional } }, + }, + '#serviceAccountToken':: d.obj(help='"serviceAccountToken is information about the serviceAccountToken data to project"'), + serviceAccountToken: { + '#withAudience':: d.fn(help='"audience is the intended audience of the token. A recipient of a token\\nmust identify itself with an identifier specified in the audience of the\\ntoken, and otherwise should reject the token. The audience defaults to the\\nidentifier of the apiserver."', args=[d.arg(name='audience', type=d.T.string)]), + withAudience(audience): { serviceAccountToken+: { audience: audience } }, + '#withExpirationSeconds':: d.fn(help='"expirationSeconds is the requested duration of validity of the service\\naccount token. As the token approaches expiration, the kubelet volume\\nplugin will proactively rotate the service account token. The kubelet will\\nstart trying to rotate the token if the token is older than 80 percent of\\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\\nand must be at least 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), + withExpirationSeconds(expirationSeconds): { serviceAccountToken+: { expirationSeconds: expirationSeconds } }, + '#withPath':: d.fn(help='"path is the path relative to the mount point of the file to project the\\ntoken into."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { serviceAccountToken+: { path: path } }, + }, + }, + '#withDefaultMode':: d.fn(help='"defaultMode are the mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { projected+: { defaultMode: defaultMode } }, + '#withSources':: d.fn(help='"sources is the list of volume projections"', args=[d.arg(name='sources', type=d.T.array)]), + withSources(sources): { projected+: { sources: if std.isArray(v=sources) then sources else [sources] } }, + '#withSourcesMixin':: d.fn(help='"sources is the list of volume projections"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sources', type=d.T.array)]), + withSourcesMixin(sources): { projected+: { sources+: if std.isArray(v=sources) then sources else [sources] } }, + }, + '#quobyte':: d.obj(help="\"quobyte represents a Quobyte mount on the host that shares a pod's lifetime\""), + quobyte: { + '#withGroup':: d.fn(help='"group to map volume access to\\nDefault is no group"', args=[d.arg(name='group', type=d.T.string)]), + withGroup(group): { quobyte+: { group: group } }, + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions.\\nDefaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { quobyte+: { readOnly: readOnly } }, + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services\\nspecified as a string as host:port pair (multiple entries are separated with commas)\\nwhich acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + withRegistry(registry): { quobyte+: { registry: registry } }, + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend\\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + withTenant(tenant): { quobyte+: { tenant: tenant } }, + '#withUser':: d.fn(help='"user to map volume access to\\nDefaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { quobyte+: { user: user } }, + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + withVolume(volume): { quobyte+: { volume: volume } }, + }, + '#rbd':: d.obj(help="\"rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md\""), + rbd: { + '#secretRef':: d.obj(help='"secretRef is name of the authentication secret for RBDUser. If provided\\noverrides keyring.\\nDefault is nil.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { rbd+: { secretRef+: { name: name } } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\\nTODO: how do we prevent errors in the filesystem from compromising the machine"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { rbd+: { fsType: fsType } }, + '#withImage':: d.fn(help='"image is the rados image name.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { rbd+: { image: image } }, + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser.\\nDefault is /etc/ceph/keyring.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + withKeyring(keyring): { rbd+: { keyring: keyring } }, + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitors(monitors): { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } }, + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitorsMixin(monitors): { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } }, + '#withPool':: d.fn(help='"pool is the rados pool name.\\nDefault is rbd.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + withPool(pool): { rbd+: { pool: pool } }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts.\\nDefaults to false.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { rbd+: { readOnly: readOnly } }, + '#withUser':: d.fn(help='"user is the rados user name.\\nDefault is admin.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { rbd+: { user: user } }, + }, + '#scaleIO':: d.obj(help='"scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes."'), + scaleIO: { + '#secretRef':: d.obj(help='"secretRef references to the secret for ScaleIO user and other\\nsensitive information. If this is not provided, Login operation will fail."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { scaleIO+: { secretRef+: { name: name } } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\".\\nDefault is \\"xfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { scaleIO+: { fsType: fsType } }, + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + withGateway(gateway): { scaleIO+: { gateway: gateway } }, + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + withProtectionDomain(protectionDomain): { scaleIO+: { protectionDomain: protectionDomain } }, + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { scaleIO+: { readOnly: readOnly } }, + '#withSslEnabled':: d.fn(help='"sslEnabled Flag enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + withSslEnabled(sslEnabled): { scaleIO+: { sslEnabled: sslEnabled } }, + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\\nDefault is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + withStorageMode(storageMode): { scaleIO+: { storageMode: storageMode } }, + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + withStoragePool(storagePool): { scaleIO+: { storagePool: storagePool } }, + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + withSystem(system): { scaleIO+: { system: system } }, + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system\\nthat is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + withVolumeName(volumeName): { scaleIO+: { volumeName: volumeName } }, + }, + '#secret':: d.obj(help='"secret represents a secret that should populate this volume.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret"'), + secret: { + '#items':: d.obj(help="\"items If unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\""), + items: { + '#withKey':: d.fn(help='"key is the key to project."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withMode':: d.fn(help='"mode is Optional: mode bits used to set permissions on this file.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"path is the relative path of the file to map the key to.\\nMay not be an absolute path.\\nMay not contain the path element '..'.\\nMay not start with the string '..'.\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + }, + '#withDefaultMode':: d.fn(help='"defaultMode is Optional: mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values\\nfor mode bits. Defaults to 0644.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { secret+: { defaultMode: defaultMode } }, + '#withItems':: d.fn(help="\"items If unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { secret+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help="\"items If unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { secret+: { items+: if std.isArray(v=items) then items else [items] } }, + '#withOptional':: d.fn(help='"optional field specify whether the Secret or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { secret+: { optional: optional } }, + '#withSecretName':: d.fn(help="\"secretName is the name of the secret in the pod's namespace to use.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\"", args=[d.arg(name='secretName', type=d.T.string)]), + withSecretName(secretName): { secret+: { secretName: secretName } }, + }, + '#storageos':: d.obj(help='"storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."'), + storageos: { + '#secretRef':: d.obj(help='"secretRef specifies the secret to use for obtaining the StorageOS API\\ncredentials. If not specified, default values will be attempted."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { storageos+: { secretRef+: { name: name } } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { storageos+: { fsType: fsType } }, + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { storageos+: { readOnly: readOnly } }, + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume\\nnames are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + withVolumeName(volumeName): { storageos+: { volumeName: volumeName } }, + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no\\nnamespace is specified then the Pod's namespace will be used. This allows the\\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\\nSet VolumeName to any name to override the default behaviour.\\nSet to \\\"default\\\" if you are not using namespaces within StorageOS.\\nNamespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + withVolumeNamespace(volumeNamespace): { storageos+: { volumeNamespace: volumeNamespace } }, + }, + '#vsphereVolume':: d.obj(help='"vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine"'), + vsphereVolume: { + '#withFsType':: d.fn(help='"fsType is filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { vsphereVolume+: { fsType: fsType } }, + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + withStoragePolicyID(storagePolicyID): { vsphereVolume+: { storagePolicyID: storagePolicyID } }, + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + withStoragePolicyName(storagePolicyName): { vsphereVolume+: { storagePolicyName: storagePolicyName } }, + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + withVolumePath(volumePath): { vsphereVolume+: { volumePath: volumePath } }, + }, + '#withName':: d.fn(help='"name of the volume.\\nMust be a DNS_LABEL and unique within the pod.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#withArgs':: d.fn(help="\"Arguments to the entrypoint.\\nThe docker image's CMD is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\\nregardless of whether the variable exists or not.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='args', type=d.T.array)]), + withArgs(args): { spec+: { args: if std.isArray(v=args) then args else [args] } }, + '#withArgsMixin':: d.fn(help="\"Arguments to the entrypoint.\\nThe docker image's CMD is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\\nregardless of whether the variable exists or not.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='args', type=d.T.array)]), + withArgsMixin(args): { spec+: { args+: if std.isArray(v=args) then args else [args] } }, + '#withEnv':: d.fn(help='"List of environment variables to set in the container.\\nCannot be updated."', args=[d.arg(name='env', type=d.T.array)]), + withEnv(env): { spec+: { env: if std.isArray(v=env) then env else [env] } }, + '#withEnvFrom':: d.fn(help='"List of sources to populate environment variables in the container.\\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\\nwill be reported as an event when the container is starting. When a key exists in multiple\\nsources, the value associated with the last source will take precedence.\\nValues defined by an Env with a duplicate key will take precedence.\\nCannot be updated."', args=[d.arg(name='envFrom', type=d.T.array)]), + withEnvFrom(envFrom): { spec+: { envFrom: if std.isArray(v=envFrom) then envFrom else [envFrom] } }, + '#withEnvFromMixin':: d.fn(help='"List of sources to populate environment variables in the container.\\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\\nwill be reported as an event when the container is starting. When a key exists in multiple\\nsources, the value associated with the last source will take precedence.\\nValues defined by an Env with a duplicate key will take precedence.\\nCannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='envFrom', type=d.T.array)]), + withEnvFromMixin(envFrom): { spec+: { envFrom+: if std.isArray(v=envFrom) then envFrom else [envFrom] } }, + '#withEnvMixin':: d.fn(help='"List of environment variables to set in the container.\\nCannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='env', type=d.T.array)]), + withEnvMixin(env): { spec+: { env+: if std.isArray(v=env) then env else [env] } }, + '#withImage':: d.fn(help='"Docker image name.\\nMore info: https://kubernetes.io/docs/concepts/containers/images\\nThis field is optional to allow higher level config management to default or override\\ncontainer images in workload controllers like Deployments and StatefulSets."', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { spec+: { image: image } }, + '#withImagePullPolicy':: d.fn(help='"Image pull policy.\\nOne of Always, Never, IfNotPresent.\\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images"', args=[d.arg(name='imagePullPolicy', type=d.T.string)]), + withImagePullPolicy(imagePullPolicy): { spec+: { imagePullPolicy: imagePullPolicy } }, + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\\nIf specified, these secrets will be passed to individual puller implementations for them to use. For example,\\nin the case of docker, only DockerConfig type secrets are honored.\\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod\\nSetting ImagePullSecrets will replace any secrets that have been\\npropagated to a controller Deployment, typically via packagePullSecrets."', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + withImagePullSecrets(imagePullSecrets): { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } }, + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\\nIf specified, these secrets will be passed to individual puller implementations for them to use. For example,\\nin the case of docker, only DockerConfig type secrets are honored.\\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod\\nSetting ImagePullSecrets will replace any secrets that have been\\npropagated to a controller Deployment, typically via packagePullSecrets."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + withImagePullSecretsMixin(imagePullSecrets): { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } }, + '#withNodeName':: d.fn(help='"NodeName is a request to schedule this pod onto a specific node. If it is non-empty,\\nthe scheduler simply schedules this pod onto that node, assuming that it fits resource\\nrequirements."', args=[d.arg(name='nodeName', type=d.T.string)]), + withNodeName(nodeName): { spec+: { nodeName: nodeName } }, + '#withNodeSelector':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node.\\nSelector which must match a node's labels for the pod to be scheduled on that node.\\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), + withNodeSelector(nodeSelector): { spec+: { nodeSelector: nodeSelector } }, + '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node.\\nSelector which must match a node's labels for the pod to be scheduled on that node.\\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), + withNodeSelectorMixin(nodeSelector): { spec+: { nodeSelector+: nodeSelector } }, + '#withPorts':: d.fn(help='"List of container ports to expose on the container"', args=[d.arg(name='ports', type=d.T.array)]), + withPorts(ports): { spec+: { ports: if std.isArray(v=ports) then ports else [ports] } }, + '#withPortsMixin':: d.fn(help='"List of container ports to expose on the container"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), + withPortsMixin(ports): { spec+: { ports+: if std.isArray(v=ports) then ports else [ports] } }, + '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and\\n\\\"system-cluster-critical\\\" are two special keywords which indicate the\\nhighest priorities with the former being the highest priority. Any other\\nname must be defined by creating a PriorityClass object with that name.\\nIf not specified, the pod priority will be default or zero if there is no\\ndefault.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), + withPriorityClassName(priorityClassName): { spec+: { priorityClassName: priorityClassName } }, + '#withReplicas':: d.fn(help='"Number of desired pods. This is a pointer to distinguish between explicit\\nzero and not specified. Defaults to 1.\\nNote: If more than 1 replica is set and leader election is not enabled then\\ncontrollers could conflict. Environment variable \\"LEADER_ELECTION\\" can be\\nused to enable leader election process."', args=[d.arg(name='replicas', type=d.T.integer)]), + withReplicas(replicas): { spec+: { replicas: replicas } }, + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used\\nto run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.\\nIf unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an\\nempty definition that uses the default runtime handler.\\nMore info: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md\\nThis is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + withRuntimeClassName(runtimeClassName): { spec+: { runtimeClassName: runtimeClassName } }, + '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod.\\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/\\nIf specified, a ServiceAccount named this ServiceAccountName will be used for\\nthe spec.serviceAccountName field in Pods to be created and for the subjects.name field\\nin a ClusterRoleBinding to be created.\\nIf there is no ServiceAccount named this ServiceAccountName, a new ServiceAccount\\nwill be created.\\nIf there is a pre-existing ServiceAccount named this ServiceAccountName, the ServiceAccount\\nwill be used. The annotations in the ControllerConfig will be copied to the ServiceAccount\\nand pre-existing annotations will be kept.\\nRegardless of whether there is a ServiceAccount created by Crossplane or is in place already,\\nthe ServiceAccount will be deleted once the Provider and ControllerConfig are deleted."', args=[d.arg(name='serviceAccountName', type=d.T.string)]), + withServiceAccountName(serviceAccountName): { spec+: { serviceAccountName: serviceAccountName } }, + '#withTolerations':: d.fn(help="\"If specified, the pod's tolerations.\"", args=[d.arg(name='tolerations', type=d.T.array)]), + withTolerations(tolerations): { spec+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } }, + '#withTolerationsMixin':: d.fn(help="\"If specified, the pod's tolerations.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tolerations', type=d.T.array)]), + withTolerationsMixin(tolerations): { spec+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } }, + '#withVolumeMounts':: d.fn(help="\"List of VolumeMounts to mount into the container's filesystem.\\nCannot be updated.\"", args=[d.arg(name='volumeMounts', type=d.T.array)]), + withVolumeMounts(volumeMounts): { spec+: { volumeMounts: if std.isArray(v=volumeMounts) then volumeMounts else [volumeMounts] } }, + '#withVolumeMountsMixin':: d.fn(help="\"List of VolumeMounts to mount into the container's filesystem.\\nCannot be updated.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeMounts', type=d.T.array)]), + withVolumeMountsMixin(volumeMounts): { spec+: { volumeMounts+: if std.isArray(v=volumeMounts) then volumeMounts else [volumeMounts] } }, + '#withVolumes':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes"', args=[d.arg(name='volumes', type=d.T.array)]), + withVolumes(volumes): { spec+: { volumes: if std.isArray(v=volumes) then volumes else [volumes] } }, + '#withVolumesMixin':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumes', type=d.T.array)]), + withVolumesMixin(volumes): { spec+: { volumes+: if std.isArray(v=volumes) then volumes else [volumes] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/pkg/v1alpha1/main.libsonnet b/crossplane/1.17/_gen/pkg/v1alpha1/main.libsonnet new file mode 100644 index 0000000..154fb87 --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1alpha1/main.libsonnet @@ -0,0 +1,5 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha1', url='', help=''), + controllerConfig: (import 'controllerConfig.libsonnet'), +} diff --git a/crossplane/1.17/_gen/pkg/v1beta1/deploymentRuntimeConfig.libsonnet b/crossplane/1.17/_gen/pkg/v1beta1/deploymentRuntimeConfig.libsonnet new file mode 100644 index 0000000..e5e464b --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1beta1/deploymentRuntimeConfig.libsonnet @@ -0,0 +1,2977 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='deploymentRuntimeConfig', url='', help='"The DeploymentRuntimeConfig provides settings for the Kubernetes Deployment\\nof a Provider or composition function package.\\n\\n\\nRead the Crossplane documentation for\\n[more information about DeploymentRuntimeConfigs](https://docs.crossplane.io/latest/concepts/providers/#runtime-configuration)."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of DeploymentRuntimeConfig', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'pkg.crossplane.io/v1beta1', + kind: 'DeploymentRuntimeConfig', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"DeploymentRuntimeConfigSpec specifies the configuration for a packaged controller.\\nValues provided will override package manager defaults. Labels and\\nannotations are passed to both the controller Deployment and ServiceAccount."'), + spec: { + '#deploymentTemplate':: d.obj(help='"DeploymentTemplate is the template for the Deployment object."'), + deploymentTemplate: { + '#metadata':: d.obj(help='"Metadata contains the configurable metadata fields for the Deployment."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that\\nmay be set by external tools to store and retrieve arbitrary metadata.\\nThey are not queryable and should be preserved when modifying objects.\\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { spec+: { deploymentTemplate+: { metadata+: { annotations: annotations } } } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that\\nmay be set by external tools to store and retrieve arbitrary metadata.\\nThey are not queryable and should be preserved when modifying objects.\\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { spec+: { deploymentTemplate+: { metadata+: { annotations+: annotations } } } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. Labels will be merged with internal labels\\nused by crossplane, and labels with a crossplane.io key might be\\noverwritten.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { spec+: { deploymentTemplate+: { metadata+: { labels: labels } } } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. Labels will be merged with internal labels\\nused by crossplane, and labels with a crossplane.io key might be\\noverwritten.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { spec+: { deploymentTemplate+: { metadata+: { labels+: labels } } } }, + '#withName':: d.fn(help='"Name is the name of the object."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { deploymentTemplate+: { metadata+: { name: name } } } }, + }, + '#spec':: d.obj(help='"Spec contains the configurable spec fields for the Deployment object."'), + spec: { + '#selector':: d.obj(help="\"Label selector for pods. Existing ReplicaSets whose pods are\\nselected by this will be the ones affected by this deployment.\\nIt must match the pod template's labels.\""), + selector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { deploymentTemplate+: { spec+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { deploymentTemplate+: { spec+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { deploymentTemplate+: { spec+: { selector+: { matchLabels: matchLabels } } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { deploymentTemplate+: { spec+: { selector+: { matchLabels+: matchLabels } } } } }, + }, + '#strategy':: d.obj(help='"The deployment strategy to use to replace existing pods with new ones."'), + strategy: { + '#rollingUpdate':: d.obj(help='"Rolling update config params. Present only if DeploymentStrategyType =\\nRollingUpdate.\\n---\\nTODO: Update this to follow our convention for oneOf, whatever we decide it\\nto be."'), + rollingUpdate: { + '#withMaxSurge':: d.fn(help='"The maximum number of pods that can be scheduled above the desired number of\\npods.\\nValue can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).\\nThis can not be 0 if MaxUnavailable is 0.\\nAbsolute number is calculated from percentage by rounding up.\\nDefaults to 25%.\\nExample: when this is set to 30%, the new ReplicaSet can be scaled up immediately when\\nthe rolling update starts, such that the total number of old and new pods do not exceed\\n130% of desired pods. Once old pods have been killed,\\nnew ReplicaSet can be scaled up further, ensuring that total number of pods running\\nat any time during the update is at most 130% of desired pods."', args=[d.arg(name='maxSurge', type=d.T.any)]), + withMaxSurge(maxSurge): { spec+: { deploymentTemplate+: { spec+: { strategy+: { rollingUpdate+: { maxSurge: maxSurge } } } } } }, + '#withMaxUnavailable':: d.fn(help='"The maximum number of pods that can be unavailable during the update.\\nValue can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).\\nAbsolute number is calculated from percentage by rounding down.\\nThis can not be 0 if MaxSurge is 0.\\nDefaults to 25%.\\nExample: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods\\nimmediately when the rolling update starts. Once new pods are ready, old ReplicaSet\\ncan be scaled down further, followed by scaling up the new ReplicaSet, ensuring\\nthat the total number of pods available at all times during the update is at\\nleast 70% of desired pods."', args=[d.arg(name='maxUnavailable', type=d.T.any)]), + withMaxUnavailable(maxUnavailable): { spec+: { deploymentTemplate+: { spec+: { strategy+: { rollingUpdate+: { maxUnavailable: maxUnavailable } } } } } }, + }, + '#withType':: d.fn(help='"Type of deployment. Can be \\"Recreate\\" or \\"RollingUpdate\\". Default is RollingUpdate."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { deploymentTemplate+: { spec+: { strategy+: { type: type } } } } }, + }, + '#template':: d.obj(help='"Template describes the pods that will be created.\\nThe only allowed template.spec.restartPolicy value is \\"Always\\"."'), + template: { + '#metadata':: d.obj(help="\"Standard object's metadata.\\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\""), + metadata: { + '#withAnnotations':: d.fn(help='', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { spec+: { deploymentTemplate+: { spec+: { template+: { metadata+: { annotations: annotations } } } } } }, + '#withAnnotationsMixin':: d.fn(help='\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { spec+: { deploymentTemplate+: { spec+: { template+: { metadata+: { annotations+: annotations } } } } } }, + '#withFinalizers':: d.fn(help='', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { spec+: { deploymentTemplate+: { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } } }, + '#withFinalizersMixin':: d.fn(help='\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { spec+: { deploymentTemplate+: { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } } }, + '#withLabels':: d.fn(help='', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { spec+: { deploymentTemplate+: { spec+: { template+: { metadata+: { labels: labels } } } } } }, + '#withLabelsMixin':: d.fn(help='\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { spec+: { deploymentTemplate+: { spec+: { template+: { metadata+: { labels+: labels } } } } } }, + '#withName':: d.fn(help='', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { deploymentTemplate+: { spec+: { template+: { metadata+: { name: name } } } } } }, + '#withNamespace':: d.fn(help='', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { deploymentTemplate+: { spec+: { template+: { metadata+: { namespace: namespace } } } } } }, + }, + '#spec':: d.obj(help='"Specification of the desired behavior of the pod.\\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status"'), + spec: { + '#affinity':: d.obj(help="\"If specified, the pod's scheduling constraints\""), + affinity: { + '#nodeAffinity':: d.obj(help='"Describes node affinity scheduling rules for the pod."'), + nodeAffinity: { + '#preferredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node matches the corresponding matchExpressions; the\\nnode(s) with the highest sum are the most preferred."'), + preferredDuringSchedulingIgnoredDuringExecution: { + '#preference':: d.obj(help='"A node selector term, associated with the corresponding weight."'), + preference: { + '#matchExpressions':: d.obj(help="\"A list of node selector requirements by node's labels.\""), + matchExpressions: { + '#withKey':: d.fn(help='"The label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"Represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#matchFields':: d.obj(help="\"A list of node selector requirements by node's fields.\""), + matchFields: { + '#withKey':: d.fn(help='"The label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"Represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help="\"A list of node selector requirements by node's labels.\"", args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { preference+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help="\"A list of node selector requirements by node's labels.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { preference+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchFields':: d.fn(help="\"A list of node selector requirements by node's fields.\"", args=[d.arg(name='matchFields', type=d.T.array)]), + withMatchFields(matchFields): { preference+: { matchFields: if std.isArray(v=matchFields) then matchFields else [matchFields] } }, + '#withMatchFieldsMixin':: d.fn(help="\"A list of node selector requirements by node's fields.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchFields', type=d.T.array)]), + withMatchFieldsMixin(matchFields): { preference+: { matchFields+: if std.isArray(v=matchFields) then matchFields else [matchFields] } }, + }, + '#withWeight':: d.fn(help='"Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100."', args=[d.arg(name='weight', type=d.T.integer)]), + withWeight(weight): { weight: weight }, + }, + '#requiredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"If the affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to an update), the system\\nmay or may not try to eventually evict the pod from its node."'), + requiredDuringSchedulingIgnoredDuringExecution: { + '#nodeSelectorTerms':: d.obj(help='"Required. A list of node selector terms. The terms are ORed."'), + nodeSelectorTerms: { + '#matchExpressions':: d.obj(help="\"A list of node selector requirements by node's labels.\""), + matchExpressions: { + '#withKey':: d.fn(help='"The label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"Represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#matchFields':: d.obj(help="\"A list of node selector requirements by node's fields.\""), + matchFields: { + '#withKey':: d.fn(help='"The label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"Represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"An array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. If the operator is Gt or Lt, the values\\narray must have a single element, which will be interpreted as an integer.\\nThis array is replaced during a strategic merge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help="\"A list of node selector requirements by node's labels.\"", args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] }, + '#withMatchExpressionsMixin':: d.fn(help="\"A list of node selector requirements by node's labels.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] }, + '#withMatchFields':: d.fn(help="\"A list of node selector requirements by node's fields.\"", args=[d.arg(name='matchFields', type=d.T.array)]), + withMatchFields(matchFields): { matchFields: if std.isArray(v=matchFields) then matchFields else [matchFields] }, + '#withMatchFieldsMixin':: d.fn(help="\"A list of node selector requirements by node's fields.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchFields', type=d.T.array)]), + withMatchFieldsMixin(matchFields): { matchFields+: if std.isArray(v=matchFields) then matchFields else [matchFields] }, + }, + '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTerms(nodeSelectorTerms): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } } } }, + '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTermsMixin(nodeSelectorTerms): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } } } }, + }, + '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node matches the corresponding matchExpressions; the\\nnode(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, + '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node matches the corresponding matchExpressions; the\\nnode(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, + }, + '#podAffinity':: d.obj(help='"Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s))."'), + podAffinity: { + '#preferredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\\nnode(s) with the highest sum are the most preferred."'), + preferredDuringSchedulingIgnoredDuringExecution: { + '#podAffinityTerm':: d.obj(help='"Required. A pod affinity term, associated with the corresponding weight."'), + podAffinityTerm: { + '#labelSelector':: d.obj(help="\"A label query over a set of resources, in this case pods.\\nIf it's null, this PodAffinityTerm matches with no Pods.\""), + labelSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { podAffinityTerm+: { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { podAffinityTerm+: { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { podAffinityTerm+: { labelSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { podAffinityTerm+: { labelSelector+: { matchLabels+: matchLabels } } }, + }, + '#namespaceSelector':: d.obj(help="\"A label query over the set of namespaces that the term applies to.\\nThe term is applied to the union of the namespaces selected by this field\\nand the ones listed in the namespaces field.\\nnull selector and null or empty namespaces list means \\\"this pod's namespace\\\".\\nAn empty selector ({}) matches all namespaces.\""), + namespaceSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { podAffinityTerm+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { podAffinityTerm+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { podAffinityTerm+: { namespaceSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { podAffinityTerm+: { namespaceSelector+: { matchLabels+: matchLabels } } }, + }, + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { podAffinityTerm+: { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] } }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { podAffinityTerm+: { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] } }, + '#withMismatchLabelKeys':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeys(mismatchLabelKeys): { podAffinityTerm+: { mismatchLabelKeys: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] } }, + '#withMismatchLabelKeysMixin':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeysMixin(mismatchLabelKeys): { podAffinityTerm+: { mismatchLabelKeys+: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] } }, + '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespaces(namespaces): { podAffinityTerm+: { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] } }, + '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespacesMixin(namespaces): { podAffinityTerm+: { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] } }, + '#withTopologyKey':: d.fn(help='"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\\nwhose value of the label with key topologyKey matches that of any node on which any of the\\nselected pods is running.\\nEmpty topologyKey is not allowed."', args=[d.arg(name='topologyKey', type=d.T.string)]), + withTopologyKey(topologyKey): { podAffinityTerm+: { topologyKey: topologyKey } }, + }, + '#withWeight':: d.fn(help='"weight associated with matching the corresponding podAffinityTerm,\\nin the range 1-100."', args=[d.arg(name='weight', type=d.T.integer)]), + withWeight(weight): { weight: weight }, + }, + '#requiredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"If the affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to a pod label update), the\\nsystem may or may not try to eventually evict the pod from its node.\\nWhen there are multiple elements, the lists of nodes corresponding to each\\npodAffinityTerm are intersected, i.e. all terms must be satisfied."'), + requiredDuringSchedulingIgnoredDuringExecution: { + '#labelSelector':: d.obj(help="\"A label query over a set of resources, in this case pods.\\nIf it's null, this PodAffinityTerm matches with no Pods.\""), + labelSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { labelSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { labelSelector+: { matchLabels+: matchLabels } }, + }, + '#namespaceSelector':: d.obj(help="\"A label query over the set of namespaces that the term applies to.\\nThe term is applied to the union of the namespaces selected by this field\\nand the ones listed in the namespaces field.\\nnull selector and null or empty namespaces list means \\\"this pod's namespace\\\".\\nAn empty selector ({}) matches all namespaces.\""), + namespaceSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { namespaceSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, + }, + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMismatchLabelKeys':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeys(mismatchLabelKeys): { mismatchLabelKeys: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] }, + '#withMismatchLabelKeysMixin':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeysMixin(mismatchLabelKeys): { mismatchLabelKeys+: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] }, + '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespaces(namespaces): { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] }, + '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespacesMixin(namespaces): { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] }, + '#withTopologyKey':: d.fn(help='"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\\nwhose value of the label with key topologyKey matches that of any node on which any of the\\nselected pods is running.\\nEmpty topologyKey is not allowed."', args=[d.arg(name='topologyKey', type=d.T.string)]), + withTopologyKey(topologyKey): { topologyKey: topologyKey }, + }, + '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\\nnode(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, + '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\\nnode(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, + '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to a pod label update), the\\nsystem may or may not try to eventually evict the pod from its node.\\nWhen there are multiple elements, the lists of nodes corresponding to each\\npodAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, + '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to a pod label update), the\\nsystem may or may not try to eventually evict the pod from its node.\\nWhen there are multiple elements, the lists of nodes corresponding to each\\npodAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, + }, + '#podAntiAffinity':: d.obj(help='"Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s))."'), + podAntiAffinity: { + '#preferredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe anti-affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\\nnode(s) with the highest sum are the most preferred."'), + preferredDuringSchedulingIgnoredDuringExecution: { + '#podAffinityTerm':: d.obj(help='"Required. A pod affinity term, associated with the corresponding weight."'), + podAffinityTerm: { + '#labelSelector':: d.obj(help="\"A label query over a set of resources, in this case pods.\\nIf it's null, this PodAffinityTerm matches with no Pods.\""), + labelSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { podAffinityTerm+: { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { podAffinityTerm+: { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { podAffinityTerm+: { labelSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { podAffinityTerm+: { labelSelector+: { matchLabels+: matchLabels } } }, + }, + '#namespaceSelector':: d.obj(help="\"A label query over the set of namespaces that the term applies to.\\nThe term is applied to the union of the namespaces selected by this field\\nand the ones listed in the namespaces field.\\nnull selector and null or empty namespaces list means \\\"this pod's namespace\\\".\\nAn empty selector ({}) matches all namespaces.\""), + namespaceSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { podAffinityTerm+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { podAffinityTerm+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { podAffinityTerm+: { namespaceSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { podAffinityTerm+: { namespaceSelector+: { matchLabels+: matchLabels } } }, + }, + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { podAffinityTerm+: { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] } }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { podAffinityTerm+: { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] } }, + '#withMismatchLabelKeys':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeys(mismatchLabelKeys): { podAffinityTerm+: { mismatchLabelKeys: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] } }, + '#withMismatchLabelKeysMixin':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeysMixin(mismatchLabelKeys): { podAffinityTerm+: { mismatchLabelKeys+: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] } }, + '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespaces(namespaces): { podAffinityTerm+: { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] } }, + '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespacesMixin(namespaces): { podAffinityTerm+: { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] } }, + '#withTopologyKey':: d.fn(help='"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\\nwhose value of the label with key topologyKey matches that of any node on which any of the\\nselected pods is running.\\nEmpty topologyKey is not allowed."', args=[d.arg(name='topologyKey', type=d.T.string)]), + withTopologyKey(topologyKey): { podAffinityTerm+: { topologyKey: topologyKey } }, + }, + '#withWeight':: d.fn(help='"weight associated with matching the corresponding podAffinityTerm,\\nin the range 1-100."', args=[d.arg(name='weight', type=d.T.integer)]), + withWeight(weight): { weight: weight }, + }, + '#requiredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"If the anti-affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the anti-affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to a pod label update), the\\nsystem may or may not try to eventually evict the pod from its node.\\nWhen there are multiple elements, the lists of nodes corresponding to each\\npodAffinityTerm are intersected, i.e. all terms must be satisfied."'), + requiredDuringSchedulingIgnoredDuringExecution: { + '#labelSelector':: d.obj(help="\"A label query over a set of resources, in this case pods.\\nIf it's null, this PodAffinityTerm matches with no Pods.\""), + labelSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { labelSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { labelSelector+: { matchLabels+: matchLabels } }, + }, + '#namespaceSelector':: d.obj(help="\"A label query over the set of namespaces that the term applies to.\\nThe term is applied to the union of the namespaces selected by this field\\nand the ones listed in the namespaces field.\\nnull selector and null or empty namespaces list means \\\"this pod's namespace\\\".\\nAn empty selector ({}) matches all namespaces.\""), + namespaceSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { namespaceSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, + }, + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMismatchLabelKeys':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeys(mismatchLabelKeys): { mismatchLabelKeys: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] }, + '#withMismatchLabelKeysMixin':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will\\nbe taken into consideration. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\\nto select the group of existing pods which pods will be taken into consideration\\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\\npod labels will be ignored. The default value is empty.\\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeysMixin(mismatchLabelKeys): { mismatchLabelKeys+: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] }, + '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespaces(namespaces): { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] }, + '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespacesMixin(namespaces): { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] }, + '#withTopologyKey':: d.fn(help='"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\\nwhose value of the label with key topologyKey matches that of any node on which any of the\\nselected pods is running.\\nEmpty topologyKey is not allowed."', args=[d.arg(name='topologyKey', type=d.T.string)]), + withTopologyKey(topologyKey): { topologyKey: topologyKey }, + }, + '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe anti-affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\\nnode(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, + '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy\\nthe anti-affinity expressions specified by this field, but it may choose\\na node that violates one or more of the expressions. The node that is\\nmost preferred is the one with the greatest sum of weights, i.e.\\nfor each node that meets all of the scheduling requirements (resource\\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\\ncompute a sum by iterating through the elements of this field and adding\\n\\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\\nnode(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, + '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the anti-affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to a pod label update), the\\nsystem may or may not try to eventually evict the pod from its node.\\nWhen there are multiple elements, the lists of nodes corresponding to each\\npodAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, + '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at\\nscheduling time, the pod will not be scheduled onto the node.\\nIf the anti-affinity requirements specified by this field cease to be met\\nat some point during pod execution (e.g. due to a pod label update), the\\nsystem may or may not try to eventually evict the pod from its node.\\nWhen there are multiple elements, the lists of nodes corresponding to each\\npodAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), + withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, + }, + }, + '#containers':: d.obj(help='"List of containers belonging to the pod.\\nContainers cannot currently be added or removed.\\nThere must be at least one container in a Pod.\\nCannot be updated."'), + containers: { + '#env':: d.obj(help='"List of environment variables to set in the container.\\nCannot be updated."'), + env: { + '#valueFrom':: d.obj(help="\"Source for the environment variable's value. Cannot be used if value is not empty.\""), + valueFrom: { + '#configMapKeyRef':: d.obj(help='"Selects a key of a ConfigMap."'), + configMapKeyRef: { + '#withKey':: d.fn(help='"The key to select."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { valueFrom+: { configMapKeyRef+: { key: key } } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { valueFrom+: { configMapKeyRef+: { name: name } } }, + '#withOptional':: d.fn(help='"Specify whether the ConfigMap or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { valueFrom+: { configMapKeyRef+: { optional: optional } } }, + }, + '#fieldRef':: d.obj(help="\"Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\u003cKEY\u003e']`, `metadata.annotations['\u003cKEY\u003e']`,\\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.\""), + fieldRef: { + '#withApiVersion':: d.fn(help='"Version of the schema the FieldPath is written in terms of, defaults to \\"v1\\"."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { valueFrom+: { fieldRef+: { apiVersion: apiVersion } } }, + '#withFieldPath':: d.fn(help='"Path of the field to select in the specified API version."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { valueFrom+: { fieldRef+: { fieldPath: fieldPath } } }, + }, + '#resourceFieldRef':: d.obj(help='"Selects a resource of the container: only resources limits and requests\\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported."'), + resourceFieldRef: { + '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { valueFrom+: { resourceFieldRef+: { containerName: containerName } } }, + '#withDivisor':: d.fn(help='"Specifies the output format of the exposed resources, defaults to \\"1\\', args=[d.arg(name='divisor', type=d.T.any)]), + withDivisor(divisor): { valueFrom+: { resourceFieldRef+: { divisor: divisor } } }, + '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { valueFrom+: { resourceFieldRef+: { resource: resource } } }, + }, + '#secretKeyRef':: d.obj(help="\"Selects a key of a secret in the pod's namespace\""), + secretKeyRef: { + '#withKey':: d.fn(help='"The key of the secret to select from. Must be a valid secret key."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { valueFrom+: { secretKeyRef+: { key: key } } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { valueFrom+: { secretKeyRef+: { name: name } } }, + '#withOptional':: d.fn(help='"Specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { valueFrom+: { secretKeyRef+: { optional: optional } } }, + }, + }, + '#withName':: d.fn(help='"Name of the environment variable. Must be a C_IDENTIFIER."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"Variable references $(VAR_NAME) are expanded\\nusing the previously defined environment variables in the container and\\nany service environment variables. If a variable cannot be resolved,\\nthe reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\\n\\"$$(VAR_NAME)\\" will produce the string literal \\"$(VAR_NAME)\\".\\nEscaped references will never be expanded, regardless of whether the variable\\nexists or not.\\nDefaults to \\"\\"."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#envFrom':: d.obj(help='"List of sources to populate environment variables in the container.\\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\\nwill be reported as an event when the container is starting. When a key exists in multiple\\nsources, the value associated with the last source will take precedence.\\nValues defined by an Env with a duplicate key will take precedence.\\nCannot be updated."'), + envFrom: { + '#configMapRef':: d.obj(help='"The ConfigMap to select from"'), + configMapRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { configMapRef+: { name: name } }, + '#withOptional':: d.fn(help='"Specify whether the ConfigMap must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { configMapRef+: { optional: optional } }, + }, + '#secretRef':: d.obj(help='"The Secret to select from"'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withOptional':: d.fn(help='"Specify whether the Secret must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { secretRef+: { optional: optional } }, + }, + '#withPrefix':: d.fn(help='"An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER."', args=[d.arg(name='prefix', type=d.T.string)]), + withPrefix(prefix): { prefix: prefix }, + }, + '#lifecycle':: d.obj(help='"Actions that the management system should take in response to container lifecycle events.\\nCannot be updated."'), + lifecycle: { + '#postStart':: d.obj(help='"PostStart is called immediately after a container is created. If the handler fails,\\nthe container is terminated and restarted according to its restart policy.\\nOther management of the container blocks until the hook completes.\\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks"'), + postStart: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { lifecycle+: { postStart+: { exec+: { command: if std.isArray(v=command) then command else [command] } } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { lifecycle+: { postStart+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { lifecycle+: { postStart+: { httpGet+: { host: host } } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { lifecycle+: { postStart+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { lifecycle+: { postStart+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { lifecycle+: { postStart+: { httpGet+: { path: path } } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { lifecycle+: { postStart+: { httpGet+: { port: port } } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { lifecycle+: { postStart+: { httpGet+: { scheme: scheme } } } }, + }, + '#sleep':: d.obj(help='"Sleep represents the duration that the container should sleep before being terminated."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { postStart+: { sleep+: { seconds: seconds } } } }, + }, + '#tcpSocket':: d.obj(help='"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\\nfor the backward compatibility. There are no validation of this field and\\nlifecycle hooks will fail in runtime when tcp handler is specified."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { lifecycle+: { postStart+: { tcpSocket+: { host: host } } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { lifecycle+: { postStart+: { tcpSocket+: { port: port } } } }, + }, + }, + '#preStop':: d.obj(help="\"PreStop is called immediately before a container is terminated due to an\\nAPI request or management event such as liveness/startup probe failure,\\npreemption, resource contention, etc. The handler is not called if the\\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\\nPreStop hook is executed. Regardless of the outcome of the handler, the\\ncontainer will eventually terminate within the Pod's termination grace\\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\\nor until the termination grace period is reached.\\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\""), + preStop: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { lifecycle+: { preStop+: { exec+: { command: if std.isArray(v=command) then command else [command] } } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { lifecycle+: { preStop+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { lifecycle+: { preStop+: { httpGet+: { host: host } } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { lifecycle+: { preStop+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { lifecycle+: { preStop+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { lifecycle+: { preStop+: { httpGet+: { path: path } } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { lifecycle+: { preStop+: { httpGet+: { port: port } } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { lifecycle+: { preStop+: { httpGet+: { scheme: scheme } } } }, + }, + '#sleep':: d.obj(help='"Sleep represents the duration that the container should sleep before being terminated."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { preStop+: { sleep+: { seconds: seconds } } } }, + }, + '#tcpSocket':: d.obj(help='"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\\nfor the backward compatibility. There are no validation of this field and\\nlifecycle hooks will fail in runtime when tcp handler is specified."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { lifecycle+: { preStop+: { tcpSocket+: { host: host } } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { lifecycle+: { preStop+: { tcpSocket+: { port: port } } } }, + }, + }, + }, + '#livenessProbe':: d.obj(help='"Periodic probe of container liveness.\\nContainer will be restarted if the probe fails.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"'), + livenessProbe: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { livenessProbe+: { exec+: { command: if std.isArray(v=command) then command else [command] } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { livenessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, + }, + '#grpc':: d.obj(help='"GRPC specifies an action involving a GRPC port."'), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { livenessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest\\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { livenessProbe+: { grpc+: { service: service } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { livenessProbe+: { httpGet+: { host: host } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { livenessProbe+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { livenessProbe+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { livenessProbe+: { httpGet+: { path: path } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { livenessProbe+: { httpGet+: { port: port } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { livenessProbe+: { httpGet+: { scheme: scheme } } }, + }, + '#tcpSocket':: d.obj(help='"TCPSocket specifies an action involving a TCP port."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { livenessProbe+: { tcpSocket+: { host: host } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { livenessProbe+: { tcpSocket+: { port: port } } }, + }, + '#withFailureThreshold':: d.fn(help='"Minimum consecutive failures for the probe to be considered failed after having succeeded.\\nDefaults to 3. Minimum value is 1."', args=[d.arg(name='failureThreshold', type=d.T.integer)]), + withFailureThreshold(failureThreshold): { livenessProbe+: { failureThreshold: failureThreshold } }, + '#withInitialDelaySeconds':: d.fn(help='"Number of seconds after the container has started before liveness probes are initiated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='initialDelaySeconds', type=d.T.integer)]), + withInitialDelaySeconds(initialDelaySeconds): { livenessProbe+: { initialDelaySeconds: initialDelaySeconds } }, + '#withPeriodSeconds':: d.fn(help='"How often (in seconds) to perform the probe.\\nDefault to 10 seconds. Minimum value is 1."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), + withPeriodSeconds(periodSeconds): { livenessProbe+: { periodSeconds: periodSeconds } }, + '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed.\\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), + withSuccessThreshold(successThreshold): { livenessProbe+: { successThreshold: successThreshold } }, + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\\nThe grace period is the duration in seconds after the processes running in the pod are sent\\na termination signal and the time when the processes are forcibly halted with a kill signal.\\nSet this value longer than the expected cleanup time for your process.\\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\\nvalue overrides the value provided by the pod spec.\\nValue must be non-negative integer. The value zero indicates stop immediately via\\nthe kill signal (no opportunity to shut down).\\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { livenessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, + '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out.\\nDefaults to 1 second. Minimum value is 1.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), + withTimeoutSeconds(timeoutSeconds): { livenessProbe+: { timeoutSeconds: timeoutSeconds } }, + }, + '#ports':: d.obj(help='"List of ports to expose from the container. Not specifying a port here\\nDOES NOT prevent that port from being exposed. Any port which is\\nlistening on the default \\"0.0.0.0\\" address inside a container will be\\naccessible from the network.\\nModifying this array with strategic merge patch may corrupt the data.\\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\\nCannot be updated."'), + ports: { + '#withContainerPort':: d.fn(help="\"Number of port to expose on the pod's IP address.\\nThis must be a valid port number, 0 \u003c x \u003c 65536.\"", args=[d.arg(name='containerPort', type=d.T.integer)]), + withContainerPort(containerPort): { containerPort: containerPort }, + '#withHostIP':: d.fn(help='"What host IP to bind the external port to."', args=[d.arg(name='hostIP', type=d.T.string)]), + withHostIP(hostIP): { hostIP: hostIP }, + '#withHostPort':: d.fn(help='"Number of port to expose on the host.\\nIf specified, this must be a valid port number, 0 < x < 65536.\\nIf HostNetwork is specified, this must match ContainerPort.\\nMost containers do not need this."', args=[d.arg(name='hostPort', type=d.T.integer)]), + withHostPort(hostPort): { hostPort: hostPort }, + '#withName':: d.fn(help='"If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\\nnamed port in a pod must have a unique name. Name for the port that can be\\nreferred to by services."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withProtocol':: d.fn(help='"Protocol for port. Must be UDP, TCP, or SCTP.\\nDefaults to \\"TCP\\"."', args=[d.arg(name='protocol', type=d.T.string)]), + withProtocol(protocol): { protocol: protocol }, + }, + '#readinessProbe':: d.obj(help='"Periodic probe of container service readiness.\\nContainer will be removed from service endpoints if the probe fails.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"'), + readinessProbe: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { readinessProbe+: { exec+: { command: if std.isArray(v=command) then command else [command] } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { readinessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, + }, + '#grpc':: d.obj(help='"GRPC specifies an action involving a GRPC port."'), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { readinessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest\\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { readinessProbe+: { grpc+: { service: service } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { readinessProbe+: { httpGet+: { host: host } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { readinessProbe+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { readinessProbe+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { readinessProbe+: { httpGet+: { path: path } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { readinessProbe+: { httpGet+: { port: port } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { readinessProbe+: { httpGet+: { scheme: scheme } } }, + }, + '#tcpSocket':: d.obj(help='"TCPSocket specifies an action involving a TCP port."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { readinessProbe+: { tcpSocket+: { host: host } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { readinessProbe+: { tcpSocket+: { port: port } } }, + }, + '#withFailureThreshold':: d.fn(help='"Minimum consecutive failures for the probe to be considered failed after having succeeded.\\nDefaults to 3. Minimum value is 1."', args=[d.arg(name='failureThreshold', type=d.T.integer)]), + withFailureThreshold(failureThreshold): { readinessProbe+: { failureThreshold: failureThreshold } }, + '#withInitialDelaySeconds':: d.fn(help='"Number of seconds after the container has started before liveness probes are initiated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='initialDelaySeconds', type=d.T.integer)]), + withInitialDelaySeconds(initialDelaySeconds): { readinessProbe+: { initialDelaySeconds: initialDelaySeconds } }, + '#withPeriodSeconds':: d.fn(help='"How often (in seconds) to perform the probe.\\nDefault to 10 seconds. Minimum value is 1."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), + withPeriodSeconds(periodSeconds): { readinessProbe+: { periodSeconds: periodSeconds } }, + '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed.\\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), + withSuccessThreshold(successThreshold): { readinessProbe+: { successThreshold: successThreshold } }, + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\\nThe grace period is the duration in seconds after the processes running in the pod are sent\\na termination signal and the time when the processes are forcibly halted with a kill signal.\\nSet this value longer than the expected cleanup time for your process.\\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\\nvalue overrides the value provided by the pod spec.\\nValue must be non-negative integer. The value zero indicates stop immediately via\\nthe kill signal (no opportunity to shut down).\\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { readinessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, + '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out.\\nDefaults to 1 second. Minimum value is 1.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), + withTimeoutSeconds(timeoutSeconds): { readinessProbe+: { timeoutSeconds: timeoutSeconds } }, + }, + '#resizePolicy':: d.obj(help='"Resources resize policy for the container."'), + resizePolicy: { + '#withResourceName':: d.fn(help='"Name of the resource to which this resource resize policy applies.\\nSupported values: cpu, memory."', args=[d.arg(name='resourceName', type=d.T.string)]), + withResourceName(resourceName): { resourceName: resourceName }, + '#withRestartPolicy':: d.fn(help='"Restart policy to apply when specified resource is resized.\\nIf not specified, it defaults to NotRequired."', args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, + }, + '#resources':: d.obj(help='"Compute Resources required by this container.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"'), + resources: { + '#claims':: d.obj(help='"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable. It can only be set for containers."'), + claims: { + '#withName':: d.fn(help='"Name must match the name of one entry in pod.spec.resourceClaims of\\nthe Pod where this field is used. It makes that resource available\\ninside a container."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#withClaims':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable. It can only be set for containers."', args=[d.arg(name='claims', type=d.T.array)]), + withClaims(claims): { resources+: { claims: if std.isArray(v=claims) then claims else [claims] } }, + '#withClaimsMixin':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable. It can only be set for containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='claims', type=d.T.array)]), + withClaimsMixin(claims): { resources+: { claims+: if std.isArray(v=claims) then claims else [claims] } }, + '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), + withLimits(limits): { resources+: { limits: limits } }, + '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), + withLimitsMixin(limits): { resources+: { limits+: limits } }, + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value. Requests cannot exceed Limits.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + withRequests(requests): { resources+: { requests: requests } }, + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value. Requests cannot exceed Limits.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + withRequestsMixin(requests): { resources+: { requests+: requests } }, + }, + '#securityContext':: d.obj(help='"SecurityContext defines the security options the container should be run with.\\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/"'), + securityContext: { + '#appArmorProfile':: d.obj(help="\"appArmorProfile is the AppArmor options to use by this container. If set, this profile\\noverrides the pod's appArmorProfile.\\nNote that this field cannot be set when spec.os.name is windows.\""), + appArmorProfile: { + '#withLocalhostProfile':: d.fn(help='"localhostProfile indicates a profile loaded on the node that should be used.\\nThe profile must be preconfigured on the node to work.\\nMust match the loaded name of the profile.\\nMust be set if and only if type is \\"Localhost\\"."', args=[d.arg(name='localhostProfile', type=d.T.string)]), + withLocalhostProfile(localhostProfile): { securityContext+: { appArmorProfile+: { localhostProfile: localhostProfile } } }, + '#withType':: d.fn(help="\"type indicates which kind of AppArmor profile will be applied.\\nValid options are:\\n Localhost - a profile pre-loaded on the node.\\n RuntimeDefault - the container runtime's default profile.\\n Unconfined - no AppArmor enforcement.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { securityContext+: { appArmorProfile+: { type: type } } }, + }, + '#capabilities':: d.obj(help='"The capabilities to add/drop when running containers.\\nDefaults to the default set of capabilities granted by the container runtime.\\nNote that this field cannot be set when spec.os.name is windows."'), + capabilities: { + '#withAdd':: d.fn(help='"Added capabilities"', args=[d.arg(name='add', type=d.T.array)]), + withAdd(add): { securityContext+: { capabilities+: { add: if std.isArray(v=add) then add else [add] } } }, + '#withAddMixin':: d.fn(help='"Added capabilities"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='add', type=d.T.array)]), + withAddMixin(add): { securityContext+: { capabilities+: { add+: if std.isArray(v=add) then add else [add] } } }, + '#withDrop':: d.fn(help='"Removed capabilities"', args=[d.arg(name='drop', type=d.T.array)]), + withDrop(drop): { securityContext+: { capabilities+: { drop: if std.isArray(v=drop) then drop else [drop] } } }, + '#withDropMixin':: d.fn(help='"Removed capabilities"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='drop', type=d.T.array)]), + withDropMixin(drop): { securityContext+: { capabilities+: { drop+: if std.isArray(v=drop) then drop else [drop] } } }, + }, + '#seLinuxOptions':: d.obj(help='"The SELinux context to be applied to the container.\\nIf unspecified, the container runtime will allocate a random SELinux context for each\\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows."'), + seLinuxOptions: { + '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), + withLevel(level): { securityContext+: { seLinuxOptions+: { level: level } } }, + '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), + withRole(role): { securityContext+: { seLinuxOptions+: { role: role } } }, + '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { securityContext+: { seLinuxOptions+: { type: type } } }, + '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { securityContext+: { seLinuxOptions+: { user: user } } }, + }, + '#seccompProfile':: d.obj(help='"The seccomp options to use by this container. If seccomp options are\\nprovided at both the pod & container level, the container options\\noverride the pod options.\\nNote that this field cannot be set when spec.os.name is windows."'), + seccompProfile: { + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used.\\nThe profile must be preconfigured on the node to work.\\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\\nMust be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + withLocalhostProfile(localhostProfile): { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } }, + '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied.\\nValid options are:\\n\\n\\nLocalhost - a profile defined in a file on the node should be used.\\nRuntimeDefault - the container runtime default profile should be used.\\nUnconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { securityContext+: { seccompProfile+: { type: type } } }, + }, + '#windowsOptions':: d.obj(help='"The Windows specific settings applied to all containers.\\nIf unspecified, the options from the PodSecurityContext will be used.\\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is linux."'), + windowsOptions: { + '#withGmsaCredentialSpec':: d.fn(help='"GMSACredentialSpec is where the GMSA admission webhook\\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\\nGMSA credential spec named by the GMSACredentialSpecName field."', args=[d.arg(name='gmsaCredentialSpec', type=d.T.string)]), + withGmsaCredentialSpec(gmsaCredentialSpec): { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } }, + '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), + withGmsaCredentialSpecName(gmsaCredentialSpecName): { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container.\\nAll of a Pod's containers must have the same effective HostProcess value\\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\\nIn addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } }, + '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process.\\nDefaults to the user specified in image metadata if unspecified.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), + withRunAsUserName(runAsUserName): { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } }, + }, + '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more\\nprivileges than its parent process. This bool directly controls if\\nthe no_new_privs flag will be set on the container process.\\nAllowPrivilegeEscalation is true always when the container is:\\n1) run as Privileged\\n2) has CAP_SYS_ADMIN\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), + withAllowPrivilegeEscalation(allowPrivilegeEscalation): { securityContext+: { allowPrivilegeEscalation: allowPrivilegeEscalation } }, + '#withPrivileged':: d.fn(help='"Run container in privileged mode.\\nProcesses in privileged containers are essentially equivalent to root on the host.\\nDefaults to false.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='privileged', type=d.T.boolean)]), + withPrivileged(privileged): { securityContext+: { privileged: privileged } }, + '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers.\\nThe default is DefaultProcMount which uses the container runtime defaults for\\nreadonly paths and masked paths.\\nThis requires the ProcMountType feature flag to be enabled.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='procMount', type=d.T.string)]), + withProcMount(procMount): { securityContext+: { procMount: procMount } }, + '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem.\\nDefault is false.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), + withReadOnlyRootFilesystem(readOnlyRootFilesystem): { securityContext+: { readOnlyRootFilesystem: readOnlyRootFilesystem } }, + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process.\\nUses runtime default if unset.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + withRunAsGroup(runAsGroup): { securityContext+: { runAsGroup: runAsGroup } }, + '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user.\\nIf true, the Kubelet will validate the image at runtime to ensure that it\\ndoes not run as UID 0 (root) and fail to start the container if it does.\\nIf unset or false, no such validation will be performed.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), + withRunAsNonRoot(runAsNonRoot): { securityContext+: { runAsNonRoot: runAsNonRoot } }, + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process.\\nDefaults to user specified in image metadata if unspecified.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + withRunAsUser(runAsUser): { securityContext+: { runAsUser: runAsUser } }, + }, + '#startupProbe':: d.obj(help="\"StartupProbe indicates that the Pod has successfully initialized.\\nIf specified, no other probes are executed until this completes successfully.\\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\\nThis cannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\""), + startupProbe: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { startupProbe+: { exec+: { command: if std.isArray(v=command) then command else [command] } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { startupProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, + }, + '#grpc':: d.obj(help='"GRPC specifies an action involving a GRPC port."'), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { startupProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest\\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { startupProbe+: { grpc+: { service: service } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { startupProbe+: { httpGet+: { host: host } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { startupProbe+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { startupProbe+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { startupProbe+: { httpGet+: { path: path } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { startupProbe+: { httpGet+: { port: port } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { startupProbe+: { httpGet+: { scheme: scheme } } }, + }, + '#tcpSocket':: d.obj(help='"TCPSocket specifies an action involving a TCP port."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { startupProbe+: { tcpSocket+: { host: host } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { startupProbe+: { tcpSocket+: { port: port } } }, + }, + '#withFailureThreshold':: d.fn(help='"Minimum consecutive failures for the probe to be considered failed after having succeeded.\\nDefaults to 3. Minimum value is 1."', args=[d.arg(name='failureThreshold', type=d.T.integer)]), + withFailureThreshold(failureThreshold): { startupProbe+: { failureThreshold: failureThreshold } }, + '#withInitialDelaySeconds':: d.fn(help='"Number of seconds after the container has started before liveness probes are initiated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='initialDelaySeconds', type=d.T.integer)]), + withInitialDelaySeconds(initialDelaySeconds): { startupProbe+: { initialDelaySeconds: initialDelaySeconds } }, + '#withPeriodSeconds':: d.fn(help='"How often (in seconds) to perform the probe.\\nDefault to 10 seconds. Minimum value is 1."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), + withPeriodSeconds(periodSeconds): { startupProbe+: { periodSeconds: periodSeconds } }, + '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed.\\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), + withSuccessThreshold(successThreshold): { startupProbe+: { successThreshold: successThreshold } }, + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\\nThe grace period is the duration in seconds after the processes running in the pod are sent\\na termination signal and the time when the processes are forcibly halted with a kill signal.\\nSet this value longer than the expected cleanup time for your process.\\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\\nvalue overrides the value provided by the pod spec.\\nValue must be non-negative integer. The value zero indicates stop immediately via\\nthe kill signal (no opportunity to shut down).\\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { startupProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, + '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out.\\nDefaults to 1 second. Minimum value is 1.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), + withTimeoutSeconds(timeoutSeconds): { startupProbe+: { timeoutSeconds: timeoutSeconds } }, + }, + '#volumeDevices':: d.obj(help='"volumeDevices is the list of block devices to be used by the container."'), + volumeDevices: { + '#withDevicePath':: d.fn(help='"devicePath is the path inside of the container that the device will be mapped to."', args=[d.arg(name='devicePath', type=d.T.string)]), + withDevicePath(devicePath): { devicePath: devicePath }, + '#withName':: d.fn(help='"name must match the name of a persistentVolumeClaim in the pod"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#volumeMounts':: d.obj(help="\"Pod volumes to mount into the container's filesystem.\\nCannot be updated.\""), + volumeMounts: { + '#withMountPath':: d.fn(help="\"Path within the container at which the volume should be mounted. Must\\nnot contain ':'.\"", args=[d.arg(name='mountPath', type=d.T.string)]), + withMountPath(mountPath): { mountPath: mountPath }, + '#withMountPropagation':: d.fn(help='"mountPropagation determines how mounts are propagated from the host\\nto container and the other way around.\\nWhen not set, MountPropagationNone is used.\\nThis field is beta in 1.10.\\nWhen RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified\\n(which defaults to None)."', args=[d.arg(name='mountPropagation', type=d.T.string)]), + withMountPropagation(mountPropagation): { mountPropagation: mountPropagation }, + '#withName':: d.fn(help='"This must match the Name of a Volume."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withReadOnly':: d.fn(help='"Mounted read-only if true, read-write otherwise (false or unspecified).\\nDefaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withRecursiveReadOnly':: d.fn(help='"RecursiveReadOnly specifies whether read-only mounts should be handled\\nrecursively.\\n\\n\\nIf ReadOnly is false, this field has no meaning and must be unspecified.\\n\\n\\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made\\nrecursively read-only. If this field is set to IfPossible, the mount is made\\nrecursively read-only, if it is supported by the container runtime. If this\\nfield is set to Enabled, the mount is made recursively read-only if it is\\nsupported by the container runtime, otherwise the pod will not be started and\\nan error will be generated to indicate the reason.\\n\\n\\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to\\nNone (or be unspecified, which defaults to None).\\n\\n\\nIf this field is not specified, it is treated as an equivalent of Disabled."', args=[d.arg(name='recursiveReadOnly', type=d.T.string)]), + withRecursiveReadOnly(recursiveReadOnly): { recursiveReadOnly: recursiveReadOnly }, + '#withSubPath':: d.fn(help="\"Path within the volume from which the container's volume should be mounted.\\nDefaults to \\\"\\\" (volume's root).\"", args=[d.arg(name='subPath', type=d.T.string)]), + withSubPath(subPath): { subPath: subPath }, + '#withSubPathExpr':: d.fn(help="\"Expanded path within the volume from which the container's volume should be mounted.\\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\\nDefaults to \\\"\\\" (volume's root).\\nSubPathExpr and SubPath are mutually exclusive.\"", args=[d.arg(name='subPathExpr', type=d.T.string)]), + withSubPathExpr(subPathExpr): { subPathExpr: subPathExpr }, + }, + '#withArgs':: d.fn(help="\"Arguments to the entrypoint.\\nThe container image's CMD is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='args', type=d.T.array)]), + withArgs(args): { args: if std.isArray(v=args) then args else [args] }, + '#withArgsMixin':: d.fn(help="\"Arguments to the entrypoint.\\nThe container image's CMD is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='args', type=d.T.array)]), + withArgsMixin(args): { args+: if std.isArray(v=args) then args else [args] }, + '#withCommand':: d.fn(help="\"Entrypoint array. Not executed within a shell.\\nThe container image's ENTRYPOINT is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { command: if std.isArray(v=command) then command else [command] }, + '#withCommandMixin':: d.fn(help="\"Entrypoint array. Not executed within a shell.\\nThe container image's ENTRYPOINT is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { command+: if std.isArray(v=command) then command else [command] }, + '#withEnv':: d.fn(help='"List of environment variables to set in the container.\\nCannot be updated."', args=[d.arg(name='env', type=d.T.array)]), + withEnv(env): { env: if std.isArray(v=env) then env else [env] }, + '#withEnvFrom':: d.fn(help='"List of sources to populate environment variables in the container.\\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\\nwill be reported as an event when the container is starting. When a key exists in multiple\\nsources, the value associated with the last source will take precedence.\\nValues defined by an Env with a duplicate key will take precedence.\\nCannot be updated."', args=[d.arg(name='envFrom', type=d.T.array)]), + withEnvFrom(envFrom): { envFrom: if std.isArray(v=envFrom) then envFrom else [envFrom] }, + '#withEnvFromMixin':: d.fn(help='"List of sources to populate environment variables in the container.\\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\\nwill be reported as an event when the container is starting. When a key exists in multiple\\nsources, the value associated with the last source will take precedence.\\nValues defined by an Env with a duplicate key will take precedence.\\nCannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='envFrom', type=d.T.array)]), + withEnvFromMixin(envFrom): { envFrom+: if std.isArray(v=envFrom) then envFrom else [envFrom] }, + '#withEnvMixin':: d.fn(help='"List of environment variables to set in the container.\\nCannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='env', type=d.T.array)]), + withEnvMixin(env): { env+: if std.isArray(v=env) then env else [env] }, + '#withImage':: d.fn(help='"Container image name.\\nMore info: https://kubernetes.io/docs/concepts/containers/images\\nThis field is optional to allow higher level config management to default or override\\ncontainer images in workload controllers like Deployments and StatefulSets."', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { image: image }, + '#withImagePullPolicy':: d.fn(help='"Image pull policy.\\nOne of Always, Never, IfNotPresent.\\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images"', args=[d.arg(name='imagePullPolicy', type=d.T.string)]), + withImagePullPolicy(imagePullPolicy): { imagePullPolicy: imagePullPolicy }, + '#withName':: d.fn(help='"Name of the container specified as a DNS_LABEL.\\nEach container in a pod must have a unique name (DNS_LABEL).\\nCannot be updated."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPorts':: d.fn(help='"List of ports to expose from the container. Not specifying a port here\\nDOES NOT prevent that port from being exposed. Any port which is\\nlistening on the default \\"0.0.0.0\\" address inside a container will be\\naccessible from the network.\\nModifying this array with strategic merge patch may corrupt the data.\\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\\nCannot be updated."', args=[d.arg(name='ports', type=d.T.array)]), + withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, + '#withPortsMixin':: d.fn(help='"List of ports to expose from the container. Not specifying a port here\\nDOES NOT prevent that port from being exposed. Any port which is\\nlistening on the default \\"0.0.0.0\\" address inside a container will be\\naccessible from the network.\\nModifying this array with strategic merge patch may corrupt the data.\\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\\nCannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), + withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#withResizePolicy':: d.fn(help='"Resources resize policy for the container."', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicy(resizePolicy): { resizePolicy: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withResizePolicyMixin':: d.fn(help='"Resources resize policy for the container."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicyMixin(resizePolicy): { resizePolicy+: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withRestartPolicy':: d.fn(help="\"RestartPolicy defines the restart behavior of individual containers in a pod.\\nThis field may only be set for init containers, and the only allowed value is \\\"Always\\\".\\nFor non-init containers or when this field is not specified,\\nthe restart behavior is defined by the Pod's restart policy and the container type.\\nSetting the RestartPolicy as \\\"Always\\\" for the init container will have the following effect:\\nthis init container will be continually restarted on\\nexit until all regular containers have terminated. Once all regular\\ncontainers have completed, all init containers with restartPolicy \\\"Always\\\"\\nwill be shut down. This lifecycle differs from normal init containers and\\nis often referred to as a \\\"sidecar\\\" container. Although this init\\ncontainer still starts in the init container sequence, it does not wait\\nfor the container to complete before proceeding to the next init\\ncontainer. Instead, the next init container starts immediately after this\\ninit container is started, or after any startupProbe has successfully\\ncompleted.\"", args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, + '#withStdin':: d.fn(help='"Whether this container should allocate a buffer for stdin in the container runtime. If this\\nis not set, reads from stdin in the container will always result in EOF.\\nDefault is false."', args=[d.arg(name='stdin', type=d.T.boolean)]), + withStdin(stdin): { stdin: stdin }, + '#withStdinOnce':: d.fn(help='"Whether the container runtime should close the stdin channel after it has been opened by\\na single attach. When stdin is true the stdin stream will remain open across multiple attach\\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\\nat which time stdin is closed and remains closed until the container is restarted. If this\\nflag is false, a container processes that reads from stdin will never receive an EOF.\\nDefault is false"', args=[d.arg(name='stdinOnce', type=d.T.boolean)]), + withStdinOnce(stdinOnce): { stdinOnce: stdinOnce }, + '#withTerminationMessagePath':: d.fn(help="\"Optional: Path at which the file to which the container's termination message\\nwill be written is mounted into the container's filesystem.\\nMessage written is intended to be brief final status, such as an assertion failure message.\\nWill be truncated by the node if greater than 4096 bytes. The total message length across\\nall containers will be limited to 12kb.\\nDefaults to /dev/termination-log.\\nCannot be updated.\"", args=[d.arg(name='terminationMessagePath', type=d.T.string)]), + withTerminationMessagePath(terminationMessagePath): { terminationMessagePath: terminationMessagePath }, + '#withTerminationMessagePolicy':: d.fn(help='"Indicate how the termination message should be populated. File will use the contents of\\nterminationMessagePath to populate the container status message on both success and failure.\\nFallbackToLogsOnError will use the last chunk of container log output if the termination\\nmessage file is empty and the container exited with an error.\\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\\nDefaults to File.\\nCannot be updated."', args=[d.arg(name='terminationMessagePolicy', type=d.T.string)]), + withTerminationMessagePolicy(terminationMessagePolicy): { terminationMessagePolicy: terminationMessagePolicy }, + '#withTty':: d.fn(help="\"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\\nDefault is false.\"", args=[d.arg(name='tty', type=d.T.boolean)]), + withTty(tty): { tty: tty }, + '#withVolumeDevices':: d.fn(help='"volumeDevices is the list of block devices to be used by the container."', args=[d.arg(name='volumeDevices', type=d.T.array)]), + withVolumeDevices(volumeDevices): { volumeDevices: if std.isArray(v=volumeDevices) then volumeDevices else [volumeDevices] }, + '#withVolumeDevicesMixin':: d.fn(help='"volumeDevices is the list of block devices to be used by the container."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeDevices', type=d.T.array)]), + withVolumeDevicesMixin(volumeDevices): { volumeDevices+: if std.isArray(v=volumeDevices) then volumeDevices else [volumeDevices] }, + '#withVolumeMounts':: d.fn(help="\"Pod volumes to mount into the container's filesystem.\\nCannot be updated.\"", args=[d.arg(name='volumeMounts', type=d.T.array)]), + withVolumeMounts(volumeMounts): { volumeMounts: if std.isArray(v=volumeMounts) then volumeMounts else [volumeMounts] }, + '#withVolumeMountsMixin':: d.fn(help="\"Pod volumes to mount into the container's filesystem.\\nCannot be updated.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeMounts', type=d.T.array)]), + withVolumeMountsMixin(volumeMounts): { volumeMounts+: if std.isArray(v=volumeMounts) then volumeMounts else [volumeMounts] }, + '#withWorkingDir':: d.fn(help="\"Container's working directory.\\nIf not specified, the container runtime's default will be used, which\\nmight be configured in the container image.\\nCannot be updated.\"", args=[d.arg(name='workingDir', type=d.T.string)]), + withWorkingDir(workingDir): { workingDir: workingDir }, + }, + '#dnsConfig':: d.obj(help='"Specifies the DNS parameters of a pod.\\nParameters specified here will be merged to the generated DNS\\nconfiguration based on DNSPolicy."'), + dnsConfig: { + '#options':: d.obj(help='"A list of DNS resolver options.\\nThis will be merged with the base options generated from DNSPolicy.\\nDuplicated entries will be removed. Resolution options given in Options\\nwill override those that appear in the base DNSPolicy."'), + options: { + '#withName':: d.fn(help='"Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withNameservers':: d.fn(help='"A list of DNS name server IP addresses.\\nThis will be appended to the base nameservers generated from DNSPolicy.\\nDuplicated nameservers will be removed."', args=[d.arg(name='nameservers', type=d.T.array)]), + withNameservers(nameservers): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { nameservers: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } } } }, + '#withNameserversMixin':: d.fn(help='"A list of DNS name server IP addresses.\\nThis will be appended to the base nameservers generated from DNSPolicy.\\nDuplicated nameservers will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nameservers', type=d.T.array)]), + withNameserversMixin(nameservers): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { nameservers+: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } } } }, + '#withOptions':: d.fn(help='"A list of DNS resolver options.\\nThis will be merged with the base options generated from DNSPolicy.\\nDuplicated entries will be removed. Resolution options given in Options\\nwill override those that appear in the base DNSPolicy."', args=[d.arg(name='options', type=d.T.array)]), + withOptions(options): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { options: if std.isArray(v=options) then options else [options] } } } } } } }, + '#withOptionsMixin':: d.fn(help='"A list of DNS resolver options.\\nThis will be merged with the base options generated from DNSPolicy.\\nDuplicated entries will be removed. Resolution options given in Options\\nwill override those that appear in the base DNSPolicy."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.array)]), + withOptionsMixin(options): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { options+: if std.isArray(v=options) then options else [options] } } } } } } }, + '#withSearches':: d.fn(help='"A list of DNS search domains for host-name lookup.\\nThis will be appended to the base search paths generated from DNSPolicy.\\nDuplicated search paths will be removed."', args=[d.arg(name='searches', type=d.T.array)]), + withSearches(searches): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches: if std.isArray(v=searches) then searches else [searches] } } } } } } }, + '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup.\\nThis will be appended to the base search paths generated from DNSPolicy.\\nDuplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), + withSearchesMixin(searches): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } } } }, + }, + '#ephemeralContainers':: d.obj(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\\npod to perform user-initiated actions such as debugging. This list cannot be specified when\\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\""), + ephemeralContainers: { + '#env':: d.obj(help='"List of environment variables to set in the container.\\nCannot be updated."'), + env: { + '#valueFrom':: d.obj(help="\"Source for the environment variable's value. Cannot be used if value is not empty.\""), + valueFrom: { + '#configMapKeyRef':: d.obj(help='"Selects a key of a ConfigMap."'), + configMapKeyRef: { + '#withKey':: d.fn(help='"The key to select."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { valueFrom+: { configMapKeyRef+: { key: key } } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { valueFrom+: { configMapKeyRef+: { name: name } } }, + '#withOptional':: d.fn(help='"Specify whether the ConfigMap or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { valueFrom+: { configMapKeyRef+: { optional: optional } } }, + }, + '#fieldRef':: d.obj(help="\"Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\u003cKEY\u003e']`, `metadata.annotations['\u003cKEY\u003e']`,\\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.\""), + fieldRef: { + '#withApiVersion':: d.fn(help='"Version of the schema the FieldPath is written in terms of, defaults to \\"v1\\"."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { valueFrom+: { fieldRef+: { apiVersion: apiVersion } } }, + '#withFieldPath':: d.fn(help='"Path of the field to select in the specified API version."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { valueFrom+: { fieldRef+: { fieldPath: fieldPath } } }, + }, + '#resourceFieldRef':: d.obj(help='"Selects a resource of the container: only resources limits and requests\\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported."'), + resourceFieldRef: { + '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { valueFrom+: { resourceFieldRef+: { containerName: containerName } } }, + '#withDivisor':: d.fn(help='"Specifies the output format of the exposed resources, defaults to \\"1\\', args=[d.arg(name='divisor', type=d.T.any)]), + withDivisor(divisor): { valueFrom+: { resourceFieldRef+: { divisor: divisor } } }, + '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { valueFrom+: { resourceFieldRef+: { resource: resource } } }, + }, + '#secretKeyRef':: d.obj(help="\"Selects a key of a secret in the pod's namespace\""), + secretKeyRef: { + '#withKey':: d.fn(help='"The key of the secret to select from. Must be a valid secret key."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { valueFrom+: { secretKeyRef+: { key: key } } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { valueFrom+: { secretKeyRef+: { name: name } } }, + '#withOptional':: d.fn(help='"Specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { valueFrom+: { secretKeyRef+: { optional: optional } } }, + }, + }, + '#withName':: d.fn(help='"Name of the environment variable. Must be a C_IDENTIFIER."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"Variable references $(VAR_NAME) are expanded\\nusing the previously defined environment variables in the container and\\nany service environment variables. If a variable cannot be resolved,\\nthe reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\\n\\"$$(VAR_NAME)\\" will produce the string literal \\"$(VAR_NAME)\\".\\nEscaped references will never be expanded, regardless of whether the variable\\nexists or not.\\nDefaults to \\"\\"."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#envFrom':: d.obj(help='"List of sources to populate environment variables in the container.\\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\\nwill be reported as an event when the container is starting. When a key exists in multiple\\nsources, the value associated with the last source will take precedence.\\nValues defined by an Env with a duplicate key will take precedence.\\nCannot be updated."'), + envFrom: { + '#configMapRef':: d.obj(help='"The ConfigMap to select from"'), + configMapRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { configMapRef+: { name: name } }, + '#withOptional':: d.fn(help='"Specify whether the ConfigMap must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { configMapRef+: { optional: optional } }, + }, + '#secretRef':: d.obj(help='"The Secret to select from"'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withOptional':: d.fn(help='"Specify whether the Secret must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { secretRef+: { optional: optional } }, + }, + '#withPrefix':: d.fn(help='"An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER."', args=[d.arg(name='prefix', type=d.T.string)]), + withPrefix(prefix): { prefix: prefix }, + }, + '#lifecycle':: d.obj(help='"Lifecycle is not allowed for ephemeral containers."'), + lifecycle: { + '#postStart':: d.obj(help='"PostStart is called immediately after a container is created. If the handler fails,\\nthe container is terminated and restarted according to its restart policy.\\nOther management of the container blocks until the hook completes.\\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks"'), + postStart: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { lifecycle+: { postStart+: { exec+: { command: if std.isArray(v=command) then command else [command] } } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { lifecycle+: { postStart+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { lifecycle+: { postStart+: { httpGet+: { host: host } } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { lifecycle+: { postStart+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { lifecycle+: { postStart+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { lifecycle+: { postStart+: { httpGet+: { path: path } } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { lifecycle+: { postStart+: { httpGet+: { port: port } } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { lifecycle+: { postStart+: { httpGet+: { scheme: scheme } } } }, + }, + '#sleep':: d.obj(help='"Sleep represents the duration that the container should sleep before being terminated."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { postStart+: { sleep+: { seconds: seconds } } } }, + }, + '#tcpSocket':: d.obj(help='"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\\nfor the backward compatibility. There are no validation of this field and\\nlifecycle hooks will fail in runtime when tcp handler is specified."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { lifecycle+: { postStart+: { tcpSocket+: { host: host } } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { lifecycle+: { postStart+: { tcpSocket+: { port: port } } } }, + }, + }, + '#preStop':: d.obj(help="\"PreStop is called immediately before a container is terminated due to an\\nAPI request or management event such as liveness/startup probe failure,\\npreemption, resource contention, etc. The handler is not called if the\\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\\nPreStop hook is executed. Regardless of the outcome of the handler, the\\ncontainer will eventually terminate within the Pod's termination grace\\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\\nor until the termination grace period is reached.\\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\""), + preStop: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { lifecycle+: { preStop+: { exec+: { command: if std.isArray(v=command) then command else [command] } } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { lifecycle+: { preStop+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { lifecycle+: { preStop+: { httpGet+: { host: host } } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { lifecycle+: { preStop+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { lifecycle+: { preStop+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { lifecycle+: { preStop+: { httpGet+: { path: path } } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { lifecycle+: { preStop+: { httpGet+: { port: port } } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { lifecycle+: { preStop+: { httpGet+: { scheme: scheme } } } }, + }, + '#sleep':: d.obj(help='"Sleep represents the duration that the container should sleep before being terminated."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { preStop+: { sleep+: { seconds: seconds } } } }, + }, + '#tcpSocket':: d.obj(help='"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\\nfor the backward compatibility. There are no validation of this field and\\nlifecycle hooks will fail in runtime when tcp handler is specified."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { lifecycle+: { preStop+: { tcpSocket+: { host: host } } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { lifecycle+: { preStop+: { tcpSocket+: { port: port } } } }, + }, + }, + }, + '#livenessProbe':: d.obj(help='"Probes are not allowed for ephemeral containers."'), + livenessProbe: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { livenessProbe+: { exec+: { command: if std.isArray(v=command) then command else [command] } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { livenessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, + }, + '#grpc':: d.obj(help='"GRPC specifies an action involving a GRPC port."'), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { livenessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest\\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { livenessProbe+: { grpc+: { service: service } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { livenessProbe+: { httpGet+: { host: host } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { livenessProbe+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { livenessProbe+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { livenessProbe+: { httpGet+: { path: path } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { livenessProbe+: { httpGet+: { port: port } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { livenessProbe+: { httpGet+: { scheme: scheme } } }, + }, + '#tcpSocket':: d.obj(help='"TCPSocket specifies an action involving a TCP port."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { livenessProbe+: { tcpSocket+: { host: host } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { livenessProbe+: { tcpSocket+: { port: port } } }, + }, + '#withFailureThreshold':: d.fn(help='"Minimum consecutive failures for the probe to be considered failed after having succeeded.\\nDefaults to 3. Minimum value is 1."', args=[d.arg(name='failureThreshold', type=d.T.integer)]), + withFailureThreshold(failureThreshold): { livenessProbe+: { failureThreshold: failureThreshold } }, + '#withInitialDelaySeconds':: d.fn(help='"Number of seconds after the container has started before liveness probes are initiated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='initialDelaySeconds', type=d.T.integer)]), + withInitialDelaySeconds(initialDelaySeconds): { livenessProbe+: { initialDelaySeconds: initialDelaySeconds } }, + '#withPeriodSeconds':: d.fn(help='"How often (in seconds) to perform the probe.\\nDefault to 10 seconds. Minimum value is 1."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), + withPeriodSeconds(periodSeconds): { livenessProbe+: { periodSeconds: periodSeconds } }, + '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed.\\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), + withSuccessThreshold(successThreshold): { livenessProbe+: { successThreshold: successThreshold } }, + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\\nThe grace period is the duration in seconds after the processes running in the pod are sent\\na termination signal and the time when the processes are forcibly halted with a kill signal.\\nSet this value longer than the expected cleanup time for your process.\\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\\nvalue overrides the value provided by the pod spec.\\nValue must be non-negative integer. The value zero indicates stop immediately via\\nthe kill signal (no opportunity to shut down).\\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { livenessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, + '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out.\\nDefaults to 1 second. Minimum value is 1.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), + withTimeoutSeconds(timeoutSeconds): { livenessProbe+: { timeoutSeconds: timeoutSeconds } }, + }, + '#ports':: d.obj(help='"Ports are not allowed for ephemeral containers."'), + ports: { + '#withContainerPort':: d.fn(help="\"Number of port to expose on the pod's IP address.\\nThis must be a valid port number, 0 \u003c x \u003c 65536.\"", args=[d.arg(name='containerPort', type=d.T.integer)]), + withContainerPort(containerPort): { containerPort: containerPort }, + '#withHostIP':: d.fn(help='"What host IP to bind the external port to."', args=[d.arg(name='hostIP', type=d.T.string)]), + withHostIP(hostIP): { hostIP: hostIP }, + '#withHostPort':: d.fn(help='"Number of port to expose on the host.\\nIf specified, this must be a valid port number, 0 < x < 65536.\\nIf HostNetwork is specified, this must match ContainerPort.\\nMost containers do not need this."', args=[d.arg(name='hostPort', type=d.T.integer)]), + withHostPort(hostPort): { hostPort: hostPort }, + '#withName':: d.fn(help='"If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\\nnamed port in a pod must have a unique name. Name for the port that can be\\nreferred to by services."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withProtocol':: d.fn(help='"Protocol for port. Must be UDP, TCP, or SCTP.\\nDefaults to \\"TCP\\"."', args=[d.arg(name='protocol', type=d.T.string)]), + withProtocol(protocol): { protocol: protocol }, + }, + '#readinessProbe':: d.obj(help='"Probes are not allowed for ephemeral containers."'), + readinessProbe: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { readinessProbe+: { exec+: { command: if std.isArray(v=command) then command else [command] } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { readinessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, + }, + '#grpc':: d.obj(help='"GRPC specifies an action involving a GRPC port."'), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { readinessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest\\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { readinessProbe+: { grpc+: { service: service } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { readinessProbe+: { httpGet+: { host: host } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { readinessProbe+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { readinessProbe+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { readinessProbe+: { httpGet+: { path: path } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { readinessProbe+: { httpGet+: { port: port } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { readinessProbe+: { httpGet+: { scheme: scheme } } }, + }, + '#tcpSocket':: d.obj(help='"TCPSocket specifies an action involving a TCP port."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { readinessProbe+: { tcpSocket+: { host: host } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { readinessProbe+: { tcpSocket+: { port: port } } }, + }, + '#withFailureThreshold':: d.fn(help='"Minimum consecutive failures for the probe to be considered failed after having succeeded.\\nDefaults to 3. Minimum value is 1."', args=[d.arg(name='failureThreshold', type=d.T.integer)]), + withFailureThreshold(failureThreshold): { readinessProbe+: { failureThreshold: failureThreshold } }, + '#withInitialDelaySeconds':: d.fn(help='"Number of seconds after the container has started before liveness probes are initiated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='initialDelaySeconds', type=d.T.integer)]), + withInitialDelaySeconds(initialDelaySeconds): { readinessProbe+: { initialDelaySeconds: initialDelaySeconds } }, + '#withPeriodSeconds':: d.fn(help='"How often (in seconds) to perform the probe.\\nDefault to 10 seconds. Minimum value is 1."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), + withPeriodSeconds(periodSeconds): { readinessProbe+: { periodSeconds: periodSeconds } }, + '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed.\\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), + withSuccessThreshold(successThreshold): { readinessProbe+: { successThreshold: successThreshold } }, + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\\nThe grace period is the duration in seconds after the processes running in the pod are sent\\na termination signal and the time when the processes are forcibly halted with a kill signal.\\nSet this value longer than the expected cleanup time for your process.\\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\\nvalue overrides the value provided by the pod spec.\\nValue must be non-negative integer. The value zero indicates stop immediately via\\nthe kill signal (no opportunity to shut down).\\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { readinessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, + '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out.\\nDefaults to 1 second. Minimum value is 1.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), + withTimeoutSeconds(timeoutSeconds): { readinessProbe+: { timeoutSeconds: timeoutSeconds } }, + }, + '#resizePolicy':: d.obj(help='"Resources resize policy for the container."'), + resizePolicy: { + '#withResourceName':: d.fn(help='"Name of the resource to which this resource resize policy applies.\\nSupported values: cpu, memory."', args=[d.arg(name='resourceName', type=d.T.string)]), + withResourceName(resourceName): { resourceName: resourceName }, + '#withRestartPolicy':: d.fn(help='"Restart policy to apply when specified resource is resized.\\nIf not specified, it defaults to NotRequired."', args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, + }, + '#resources':: d.obj(help='"Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources\\nalready allocated to the pod."'), + resources: { + '#claims':: d.obj(help='"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable. It can only be set for containers."'), + claims: { + '#withName':: d.fn(help='"Name must match the name of one entry in pod.spec.resourceClaims of\\nthe Pod where this field is used. It makes that resource available\\ninside a container."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#withClaims':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable. It can only be set for containers."', args=[d.arg(name='claims', type=d.T.array)]), + withClaims(claims): { resources+: { claims: if std.isArray(v=claims) then claims else [claims] } }, + '#withClaimsMixin':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable. It can only be set for containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='claims', type=d.T.array)]), + withClaimsMixin(claims): { resources+: { claims+: if std.isArray(v=claims) then claims else [claims] } }, + '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), + withLimits(limits): { resources+: { limits: limits } }, + '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), + withLimitsMixin(limits): { resources+: { limits+: limits } }, + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value. Requests cannot exceed Limits.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + withRequests(requests): { resources+: { requests: requests } }, + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value. Requests cannot exceed Limits.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + withRequestsMixin(requests): { resources+: { requests+: requests } }, + }, + '#securityContext':: d.obj(help='"Optional: SecurityContext defines the security options the ephemeral container should be run with.\\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext."'), + securityContext: { + '#appArmorProfile':: d.obj(help="\"appArmorProfile is the AppArmor options to use by this container. If set, this profile\\noverrides the pod's appArmorProfile.\\nNote that this field cannot be set when spec.os.name is windows.\""), + appArmorProfile: { + '#withLocalhostProfile':: d.fn(help='"localhostProfile indicates a profile loaded on the node that should be used.\\nThe profile must be preconfigured on the node to work.\\nMust match the loaded name of the profile.\\nMust be set if and only if type is \\"Localhost\\"."', args=[d.arg(name='localhostProfile', type=d.T.string)]), + withLocalhostProfile(localhostProfile): { securityContext+: { appArmorProfile+: { localhostProfile: localhostProfile } } }, + '#withType':: d.fn(help="\"type indicates which kind of AppArmor profile will be applied.\\nValid options are:\\n Localhost - a profile pre-loaded on the node.\\n RuntimeDefault - the container runtime's default profile.\\n Unconfined - no AppArmor enforcement.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { securityContext+: { appArmorProfile+: { type: type } } }, + }, + '#capabilities':: d.obj(help='"The capabilities to add/drop when running containers.\\nDefaults to the default set of capabilities granted by the container runtime.\\nNote that this field cannot be set when spec.os.name is windows."'), + capabilities: { + '#withAdd':: d.fn(help='"Added capabilities"', args=[d.arg(name='add', type=d.T.array)]), + withAdd(add): { securityContext+: { capabilities+: { add: if std.isArray(v=add) then add else [add] } } }, + '#withAddMixin':: d.fn(help='"Added capabilities"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='add', type=d.T.array)]), + withAddMixin(add): { securityContext+: { capabilities+: { add+: if std.isArray(v=add) then add else [add] } } }, + '#withDrop':: d.fn(help='"Removed capabilities"', args=[d.arg(name='drop', type=d.T.array)]), + withDrop(drop): { securityContext+: { capabilities+: { drop: if std.isArray(v=drop) then drop else [drop] } } }, + '#withDropMixin':: d.fn(help='"Removed capabilities"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='drop', type=d.T.array)]), + withDropMixin(drop): { securityContext+: { capabilities+: { drop+: if std.isArray(v=drop) then drop else [drop] } } }, + }, + '#seLinuxOptions':: d.obj(help='"The SELinux context to be applied to the container.\\nIf unspecified, the container runtime will allocate a random SELinux context for each\\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows."'), + seLinuxOptions: { + '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), + withLevel(level): { securityContext+: { seLinuxOptions+: { level: level } } }, + '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), + withRole(role): { securityContext+: { seLinuxOptions+: { role: role } } }, + '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { securityContext+: { seLinuxOptions+: { type: type } } }, + '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { securityContext+: { seLinuxOptions+: { user: user } } }, + }, + '#seccompProfile':: d.obj(help='"The seccomp options to use by this container. If seccomp options are\\nprovided at both the pod & container level, the container options\\noverride the pod options.\\nNote that this field cannot be set when spec.os.name is windows."'), + seccompProfile: { + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used.\\nThe profile must be preconfigured on the node to work.\\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\\nMust be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + withLocalhostProfile(localhostProfile): { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } }, + '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied.\\nValid options are:\\n\\n\\nLocalhost - a profile defined in a file on the node should be used.\\nRuntimeDefault - the container runtime default profile should be used.\\nUnconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { securityContext+: { seccompProfile+: { type: type } } }, + }, + '#windowsOptions':: d.obj(help='"The Windows specific settings applied to all containers.\\nIf unspecified, the options from the PodSecurityContext will be used.\\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is linux."'), + windowsOptions: { + '#withGmsaCredentialSpec':: d.fn(help='"GMSACredentialSpec is where the GMSA admission webhook\\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\\nGMSA credential spec named by the GMSACredentialSpecName field."', args=[d.arg(name='gmsaCredentialSpec', type=d.T.string)]), + withGmsaCredentialSpec(gmsaCredentialSpec): { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } }, + '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), + withGmsaCredentialSpecName(gmsaCredentialSpecName): { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container.\\nAll of a Pod's containers must have the same effective HostProcess value\\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\\nIn addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } }, + '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process.\\nDefaults to the user specified in image metadata if unspecified.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), + withRunAsUserName(runAsUserName): { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } }, + }, + '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more\\nprivileges than its parent process. This bool directly controls if\\nthe no_new_privs flag will be set on the container process.\\nAllowPrivilegeEscalation is true always when the container is:\\n1) run as Privileged\\n2) has CAP_SYS_ADMIN\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), + withAllowPrivilegeEscalation(allowPrivilegeEscalation): { securityContext+: { allowPrivilegeEscalation: allowPrivilegeEscalation } }, + '#withPrivileged':: d.fn(help='"Run container in privileged mode.\\nProcesses in privileged containers are essentially equivalent to root on the host.\\nDefaults to false.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='privileged', type=d.T.boolean)]), + withPrivileged(privileged): { securityContext+: { privileged: privileged } }, + '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers.\\nThe default is DefaultProcMount which uses the container runtime defaults for\\nreadonly paths and masked paths.\\nThis requires the ProcMountType feature flag to be enabled.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='procMount', type=d.T.string)]), + withProcMount(procMount): { securityContext+: { procMount: procMount } }, + '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem.\\nDefault is false.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), + withReadOnlyRootFilesystem(readOnlyRootFilesystem): { securityContext+: { readOnlyRootFilesystem: readOnlyRootFilesystem } }, + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process.\\nUses runtime default if unset.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + withRunAsGroup(runAsGroup): { securityContext+: { runAsGroup: runAsGroup } }, + '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user.\\nIf true, the Kubelet will validate the image at runtime to ensure that it\\ndoes not run as UID 0 (root) and fail to start the container if it does.\\nIf unset or false, no such validation will be performed.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), + withRunAsNonRoot(runAsNonRoot): { securityContext+: { runAsNonRoot: runAsNonRoot } }, + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process.\\nDefaults to user specified in image metadata if unspecified.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + withRunAsUser(runAsUser): { securityContext+: { runAsUser: runAsUser } }, + }, + '#startupProbe':: d.obj(help='"Probes are not allowed for ephemeral containers."'), + startupProbe: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { startupProbe+: { exec+: { command: if std.isArray(v=command) then command else [command] } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { startupProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, + }, + '#grpc':: d.obj(help='"GRPC specifies an action involving a GRPC port."'), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { startupProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest\\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { startupProbe+: { grpc+: { service: service } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { startupProbe+: { httpGet+: { host: host } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { startupProbe+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { startupProbe+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { startupProbe+: { httpGet+: { path: path } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { startupProbe+: { httpGet+: { port: port } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { startupProbe+: { httpGet+: { scheme: scheme } } }, + }, + '#tcpSocket':: d.obj(help='"TCPSocket specifies an action involving a TCP port."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { startupProbe+: { tcpSocket+: { host: host } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { startupProbe+: { tcpSocket+: { port: port } } }, + }, + '#withFailureThreshold':: d.fn(help='"Minimum consecutive failures for the probe to be considered failed after having succeeded.\\nDefaults to 3. Minimum value is 1."', args=[d.arg(name='failureThreshold', type=d.T.integer)]), + withFailureThreshold(failureThreshold): { startupProbe+: { failureThreshold: failureThreshold } }, + '#withInitialDelaySeconds':: d.fn(help='"Number of seconds after the container has started before liveness probes are initiated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='initialDelaySeconds', type=d.T.integer)]), + withInitialDelaySeconds(initialDelaySeconds): { startupProbe+: { initialDelaySeconds: initialDelaySeconds } }, + '#withPeriodSeconds':: d.fn(help='"How often (in seconds) to perform the probe.\\nDefault to 10 seconds. Minimum value is 1."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), + withPeriodSeconds(periodSeconds): { startupProbe+: { periodSeconds: periodSeconds } }, + '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed.\\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), + withSuccessThreshold(successThreshold): { startupProbe+: { successThreshold: successThreshold } }, + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\\nThe grace period is the duration in seconds after the processes running in the pod are sent\\na termination signal and the time when the processes are forcibly halted with a kill signal.\\nSet this value longer than the expected cleanup time for your process.\\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\\nvalue overrides the value provided by the pod spec.\\nValue must be non-negative integer. The value zero indicates stop immediately via\\nthe kill signal (no opportunity to shut down).\\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { startupProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, + '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out.\\nDefaults to 1 second. Minimum value is 1.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), + withTimeoutSeconds(timeoutSeconds): { startupProbe+: { timeoutSeconds: timeoutSeconds } }, + }, + '#volumeDevices':: d.obj(help='"volumeDevices is the list of block devices to be used by the container."'), + volumeDevices: { + '#withDevicePath':: d.fn(help='"devicePath is the path inside of the container that the device will be mapped to."', args=[d.arg(name='devicePath', type=d.T.string)]), + withDevicePath(devicePath): { devicePath: devicePath }, + '#withName':: d.fn(help='"name must match the name of a persistentVolumeClaim in the pod"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#volumeMounts':: d.obj(help="\"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.\\nCannot be updated.\""), + volumeMounts: { + '#withMountPath':: d.fn(help="\"Path within the container at which the volume should be mounted. Must\\nnot contain ':'.\"", args=[d.arg(name='mountPath', type=d.T.string)]), + withMountPath(mountPath): { mountPath: mountPath }, + '#withMountPropagation':: d.fn(help='"mountPropagation determines how mounts are propagated from the host\\nto container and the other way around.\\nWhen not set, MountPropagationNone is used.\\nThis field is beta in 1.10.\\nWhen RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified\\n(which defaults to None)."', args=[d.arg(name='mountPropagation', type=d.T.string)]), + withMountPropagation(mountPropagation): { mountPropagation: mountPropagation }, + '#withName':: d.fn(help='"This must match the Name of a Volume."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withReadOnly':: d.fn(help='"Mounted read-only if true, read-write otherwise (false or unspecified).\\nDefaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withRecursiveReadOnly':: d.fn(help='"RecursiveReadOnly specifies whether read-only mounts should be handled\\nrecursively.\\n\\n\\nIf ReadOnly is false, this field has no meaning and must be unspecified.\\n\\n\\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made\\nrecursively read-only. If this field is set to IfPossible, the mount is made\\nrecursively read-only, if it is supported by the container runtime. If this\\nfield is set to Enabled, the mount is made recursively read-only if it is\\nsupported by the container runtime, otherwise the pod will not be started and\\nan error will be generated to indicate the reason.\\n\\n\\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to\\nNone (or be unspecified, which defaults to None).\\n\\n\\nIf this field is not specified, it is treated as an equivalent of Disabled."', args=[d.arg(name='recursiveReadOnly', type=d.T.string)]), + withRecursiveReadOnly(recursiveReadOnly): { recursiveReadOnly: recursiveReadOnly }, + '#withSubPath':: d.fn(help="\"Path within the volume from which the container's volume should be mounted.\\nDefaults to \\\"\\\" (volume's root).\"", args=[d.arg(name='subPath', type=d.T.string)]), + withSubPath(subPath): { subPath: subPath }, + '#withSubPathExpr':: d.fn(help="\"Expanded path within the volume from which the container's volume should be mounted.\\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\\nDefaults to \\\"\\\" (volume's root).\\nSubPathExpr and SubPath are mutually exclusive.\"", args=[d.arg(name='subPathExpr', type=d.T.string)]), + withSubPathExpr(subPathExpr): { subPathExpr: subPathExpr }, + }, + '#withArgs':: d.fn(help="\"Arguments to the entrypoint.\\nThe image's CMD is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='args', type=d.T.array)]), + withArgs(args): { args: if std.isArray(v=args) then args else [args] }, + '#withArgsMixin':: d.fn(help="\"Arguments to the entrypoint.\\nThe image's CMD is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='args', type=d.T.array)]), + withArgsMixin(args): { args+: if std.isArray(v=args) then args else [args] }, + '#withCommand':: d.fn(help="\"Entrypoint array. Not executed within a shell.\\nThe image's ENTRYPOINT is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { command: if std.isArray(v=command) then command else [command] }, + '#withCommandMixin':: d.fn(help="\"Entrypoint array. Not executed within a shell.\\nThe image's ENTRYPOINT is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { command+: if std.isArray(v=command) then command else [command] }, + '#withEnv':: d.fn(help='"List of environment variables to set in the container.\\nCannot be updated."', args=[d.arg(name='env', type=d.T.array)]), + withEnv(env): { env: if std.isArray(v=env) then env else [env] }, + '#withEnvFrom':: d.fn(help='"List of sources to populate environment variables in the container.\\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\\nwill be reported as an event when the container is starting. When a key exists in multiple\\nsources, the value associated with the last source will take precedence.\\nValues defined by an Env with a duplicate key will take precedence.\\nCannot be updated."', args=[d.arg(name='envFrom', type=d.T.array)]), + withEnvFrom(envFrom): { envFrom: if std.isArray(v=envFrom) then envFrom else [envFrom] }, + '#withEnvFromMixin':: d.fn(help='"List of sources to populate environment variables in the container.\\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\\nwill be reported as an event when the container is starting. When a key exists in multiple\\nsources, the value associated with the last source will take precedence.\\nValues defined by an Env with a duplicate key will take precedence.\\nCannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='envFrom', type=d.T.array)]), + withEnvFromMixin(envFrom): { envFrom+: if std.isArray(v=envFrom) then envFrom else [envFrom] }, + '#withEnvMixin':: d.fn(help='"List of environment variables to set in the container.\\nCannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='env', type=d.T.array)]), + withEnvMixin(env): { env+: if std.isArray(v=env) then env else [env] }, + '#withImage':: d.fn(help='"Container image name.\\nMore info: https://kubernetes.io/docs/concepts/containers/images"', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { image: image }, + '#withImagePullPolicy':: d.fn(help='"Image pull policy.\\nOne of Always, Never, IfNotPresent.\\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images"', args=[d.arg(name='imagePullPolicy', type=d.T.string)]), + withImagePullPolicy(imagePullPolicy): { imagePullPolicy: imagePullPolicy }, + '#withName':: d.fn(help='"Name of the ephemeral container specified as a DNS_LABEL.\\nThis name must be unique among all containers, init containers and ephemeral containers."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPorts':: d.fn(help='"Ports are not allowed for ephemeral containers."', args=[d.arg(name='ports', type=d.T.array)]), + withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, + '#withPortsMixin':: d.fn(help='"Ports are not allowed for ephemeral containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), + withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#withResizePolicy':: d.fn(help='"Resources resize policy for the container."', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicy(resizePolicy): { resizePolicy: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withResizePolicyMixin':: d.fn(help='"Resources resize policy for the container."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicyMixin(resizePolicy): { resizePolicy+: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withRestartPolicy':: d.fn(help='"Restart policy for the container to manage the restart behavior of each\\ncontainer within a pod.\\nThis may only be set for init containers. You cannot set this field on\\nephemeral containers."', args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, + '#withStdin':: d.fn(help='"Whether this container should allocate a buffer for stdin in the container runtime. If this\\nis not set, reads from stdin in the container will always result in EOF.\\nDefault is false."', args=[d.arg(name='stdin', type=d.T.boolean)]), + withStdin(stdin): { stdin: stdin }, + '#withStdinOnce':: d.fn(help='"Whether the container runtime should close the stdin channel after it has been opened by\\na single attach. When stdin is true the stdin stream will remain open across multiple attach\\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\\nat which time stdin is closed and remains closed until the container is restarted. If this\\nflag is false, a container processes that reads from stdin will never receive an EOF.\\nDefault is false"', args=[d.arg(name='stdinOnce', type=d.T.boolean)]), + withStdinOnce(stdinOnce): { stdinOnce: stdinOnce }, + '#withTargetContainerName':: d.fn(help='"If set, the name of the container from PodSpec that this ephemeral container targets.\\nThe ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.\\nIf not set then the ephemeral container uses the namespaces configured in the Pod spec.\\n\\n\\nThe container runtime must implement support for this feature. If the runtime does not\\nsupport namespace targeting then the result of setting this field is undefined."', args=[d.arg(name='targetContainerName', type=d.T.string)]), + withTargetContainerName(targetContainerName): { targetContainerName: targetContainerName }, + '#withTerminationMessagePath':: d.fn(help="\"Optional: Path at which the file to which the container's termination message\\nwill be written is mounted into the container's filesystem.\\nMessage written is intended to be brief final status, such as an assertion failure message.\\nWill be truncated by the node if greater than 4096 bytes. The total message length across\\nall containers will be limited to 12kb.\\nDefaults to /dev/termination-log.\\nCannot be updated.\"", args=[d.arg(name='terminationMessagePath', type=d.T.string)]), + withTerminationMessagePath(terminationMessagePath): { terminationMessagePath: terminationMessagePath }, + '#withTerminationMessagePolicy':: d.fn(help='"Indicate how the termination message should be populated. File will use the contents of\\nterminationMessagePath to populate the container status message on both success and failure.\\nFallbackToLogsOnError will use the last chunk of container log output if the termination\\nmessage file is empty and the container exited with an error.\\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\\nDefaults to File.\\nCannot be updated."', args=[d.arg(name='terminationMessagePolicy', type=d.T.string)]), + withTerminationMessagePolicy(terminationMessagePolicy): { terminationMessagePolicy: terminationMessagePolicy }, + '#withTty':: d.fn(help="\"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\\nDefault is false.\"", args=[d.arg(name='tty', type=d.T.boolean)]), + withTty(tty): { tty: tty }, + '#withVolumeDevices':: d.fn(help='"volumeDevices is the list of block devices to be used by the container."', args=[d.arg(name='volumeDevices', type=d.T.array)]), + withVolumeDevices(volumeDevices): { volumeDevices: if std.isArray(v=volumeDevices) then volumeDevices else [volumeDevices] }, + '#withVolumeDevicesMixin':: d.fn(help='"volumeDevices is the list of block devices to be used by the container."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeDevices', type=d.T.array)]), + withVolumeDevicesMixin(volumeDevices): { volumeDevices+: if std.isArray(v=volumeDevices) then volumeDevices else [volumeDevices] }, + '#withVolumeMounts':: d.fn(help="\"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.\\nCannot be updated.\"", args=[d.arg(name='volumeMounts', type=d.T.array)]), + withVolumeMounts(volumeMounts): { volumeMounts: if std.isArray(v=volumeMounts) then volumeMounts else [volumeMounts] }, + '#withVolumeMountsMixin':: d.fn(help="\"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.\\nCannot be updated.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeMounts', type=d.T.array)]), + withVolumeMountsMixin(volumeMounts): { volumeMounts+: if std.isArray(v=volumeMounts) then volumeMounts else [volumeMounts] }, + '#withWorkingDir':: d.fn(help="\"Container's working directory.\\nIf not specified, the container runtime's default will be used, which\\nmight be configured in the container image.\\nCannot be updated.\"", args=[d.arg(name='workingDir', type=d.T.string)]), + withWorkingDir(workingDir): { workingDir: workingDir }, + }, + '#hostAliases':: d.obj(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts\\nfile if specified.\""), + hostAliases: { + '#withHostnames':: d.fn(help='"Hostnames for the above IP address."', args=[d.arg(name='hostnames', type=d.T.array)]), + withHostnames(hostnames): { hostnames: if std.isArray(v=hostnames) then hostnames else [hostnames] }, + '#withHostnamesMixin':: d.fn(help='"Hostnames for the above IP address."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hostnames', type=d.T.array)]), + withHostnamesMixin(hostnames): { hostnames+: if std.isArray(v=hostnames) then hostnames else [hostnames] }, + '#withIp':: d.fn(help='"IP address of the host file entry."', args=[d.arg(name='ip', type=d.T.string)]), + withIp(ip): { ip: ip }, + }, + '#imagePullSecrets':: d.obj(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\\nIf specified, these secrets will be passed to individual puller implementations for them to use.\\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"'), + imagePullSecrets: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#initContainers':: d.obj(help='"List of initialization containers belonging to the pod.\\nInit containers are executed in order prior to containers being started. If any\\ninit container fails, the pod is considered to have failed and is handled according\\nto its restartPolicy. The name for an init container or normal container must be\\nunique among all containers.\\nInit containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.\\nThe resourceRequirements of an init container are taken into account during scheduling\\nby finding the highest request/limit for each resource type, and then using the max of\\nof that value or the sum of the normal containers. Limits are applied to init containers\\nin a similar fashion.\\nInit containers cannot currently be added or removed.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"'), + initContainers: { + '#env':: d.obj(help='"List of environment variables to set in the container.\\nCannot be updated."'), + env: { + '#valueFrom':: d.obj(help="\"Source for the environment variable's value. Cannot be used if value is not empty.\""), + valueFrom: { + '#configMapKeyRef':: d.obj(help='"Selects a key of a ConfigMap."'), + configMapKeyRef: { + '#withKey':: d.fn(help='"The key to select."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { valueFrom+: { configMapKeyRef+: { key: key } } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { valueFrom+: { configMapKeyRef+: { name: name } } }, + '#withOptional':: d.fn(help='"Specify whether the ConfigMap or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { valueFrom+: { configMapKeyRef+: { optional: optional } } }, + }, + '#fieldRef':: d.obj(help="\"Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\u003cKEY\u003e']`, `metadata.annotations['\u003cKEY\u003e']`,\\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.\""), + fieldRef: { + '#withApiVersion':: d.fn(help='"Version of the schema the FieldPath is written in terms of, defaults to \\"v1\\"."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { valueFrom+: { fieldRef+: { apiVersion: apiVersion } } }, + '#withFieldPath':: d.fn(help='"Path of the field to select in the specified API version."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { valueFrom+: { fieldRef+: { fieldPath: fieldPath } } }, + }, + '#resourceFieldRef':: d.obj(help='"Selects a resource of the container: only resources limits and requests\\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported."'), + resourceFieldRef: { + '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { valueFrom+: { resourceFieldRef+: { containerName: containerName } } }, + '#withDivisor':: d.fn(help='"Specifies the output format of the exposed resources, defaults to \\"1\\', args=[d.arg(name='divisor', type=d.T.any)]), + withDivisor(divisor): { valueFrom+: { resourceFieldRef+: { divisor: divisor } } }, + '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { valueFrom+: { resourceFieldRef+: { resource: resource } } }, + }, + '#secretKeyRef':: d.obj(help="\"Selects a key of a secret in the pod's namespace\""), + secretKeyRef: { + '#withKey':: d.fn(help='"The key of the secret to select from. Must be a valid secret key."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { valueFrom+: { secretKeyRef+: { key: key } } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { valueFrom+: { secretKeyRef+: { name: name } } }, + '#withOptional':: d.fn(help='"Specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { valueFrom+: { secretKeyRef+: { optional: optional } } }, + }, + }, + '#withName':: d.fn(help='"Name of the environment variable. Must be a C_IDENTIFIER."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"Variable references $(VAR_NAME) are expanded\\nusing the previously defined environment variables in the container and\\nany service environment variables. If a variable cannot be resolved,\\nthe reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\\n\\"$$(VAR_NAME)\\" will produce the string literal \\"$(VAR_NAME)\\".\\nEscaped references will never be expanded, regardless of whether the variable\\nexists or not.\\nDefaults to \\"\\"."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#envFrom':: d.obj(help='"List of sources to populate environment variables in the container.\\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\\nwill be reported as an event when the container is starting. When a key exists in multiple\\nsources, the value associated with the last source will take precedence.\\nValues defined by an Env with a duplicate key will take precedence.\\nCannot be updated."'), + envFrom: { + '#configMapRef':: d.obj(help='"The ConfigMap to select from"'), + configMapRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { configMapRef+: { name: name } }, + '#withOptional':: d.fn(help='"Specify whether the ConfigMap must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { configMapRef+: { optional: optional } }, + }, + '#secretRef':: d.obj(help='"The Secret to select from"'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withOptional':: d.fn(help='"Specify whether the Secret must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { secretRef+: { optional: optional } }, + }, + '#withPrefix':: d.fn(help='"An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER."', args=[d.arg(name='prefix', type=d.T.string)]), + withPrefix(prefix): { prefix: prefix }, + }, + '#lifecycle':: d.obj(help='"Actions that the management system should take in response to container lifecycle events.\\nCannot be updated."'), + lifecycle: { + '#postStart':: d.obj(help='"PostStart is called immediately after a container is created. If the handler fails,\\nthe container is terminated and restarted according to its restart policy.\\nOther management of the container blocks until the hook completes.\\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks"'), + postStart: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { lifecycle+: { postStart+: { exec+: { command: if std.isArray(v=command) then command else [command] } } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { lifecycle+: { postStart+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { lifecycle+: { postStart+: { httpGet+: { host: host } } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { lifecycle+: { postStart+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { lifecycle+: { postStart+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { lifecycle+: { postStart+: { httpGet+: { path: path } } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { lifecycle+: { postStart+: { httpGet+: { port: port } } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { lifecycle+: { postStart+: { httpGet+: { scheme: scheme } } } }, + }, + '#sleep':: d.obj(help='"Sleep represents the duration that the container should sleep before being terminated."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { postStart+: { sleep+: { seconds: seconds } } } }, + }, + '#tcpSocket':: d.obj(help='"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\\nfor the backward compatibility. There are no validation of this field and\\nlifecycle hooks will fail in runtime when tcp handler is specified."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { lifecycle+: { postStart+: { tcpSocket+: { host: host } } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { lifecycle+: { postStart+: { tcpSocket+: { port: port } } } }, + }, + }, + '#preStop':: d.obj(help="\"PreStop is called immediately before a container is terminated due to an\\nAPI request or management event such as liveness/startup probe failure,\\npreemption, resource contention, etc. The handler is not called if the\\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\\nPreStop hook is executed. Regardless of the outcome of the handler, the\\ncontainer will eventually terminate within the Pod's termination grace\\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\\nor until the termination grace period is reached.\\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\""), + preStop: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { lifecycle+: { preStop+: { exec+: { command: if std.isArray(v=command) then command else [command] } } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { lifecycle+: { preStop+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { lifecycle+: { preStop+: { httpGet+: { host: host } } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { lifecycle+: { preStop+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { lifecycle+: { preStop+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { lifecycle+: { preStop+: { httpGet+: { path: path } } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { lifecycle+: { preStop+: { httpGet+: { port: port } } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { lifecycle+: { preStop+: { httpGet+: { scheme: scheme } } } }, + }, + '#sleep':: d.obj(help='"Sleep represents the duration that the container should sleep before being terminated."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { preStop+: { sleep+: { seconds: seconds } } } }, + }, + '#tcpSocket':: d.obj(help='"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\\nfor the backward compatibility. There are no validation of this field and\\nlifecycle hooks will fail in runtime when tcp handler is specified."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { lifecycle+: { preStop+: { tcpSocket+: { host: host } } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { lifecycle+: { preStop+: { tcpSocket+: { port: port } } } }, + }, + }, + }, + '#livenessProbe':: d.obj(help='"Periodic probe of container liveness.\\nContainer will be restarted if the probe fails.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"'), + livenessProbe: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { livenessProbe+: { exec+: { command: if std.isArray(v=command) then command else [command] } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { livenessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, + }, + '#grpc':: d.obj(help='"GRPC specifies an action involving a GRPC port."'), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { livenessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest\\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { livenessProbe+: { grpc+: { service: service } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { livenessProbe+: { httpGet+: { host: host } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { livenessProbe+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { livenessProbe+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { livenessProbe+: { httpGet+: { path: path } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { livenessProbe+: { httpGet+: { port: port } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { livenessProbe+: { httpGet+: { scheme: scheme } } }, + }, + '#tcpSocket':: d.obj(help='"TCPSocket specifies an action involving a TCP port."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { livenessProbe+: { tcpSocket+: { host: host } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { livenessProbe+: { tcpSocket+: { port: port } } }, + }, + '#withFailureThreshold':: d.fn(help='"Minimum consecutive failures for the probe to be considered failed after having succeeded.\\nDefaults to 3. Minimum value is 1."', args=[d.arg(name='failureThreshold', type=d.T.integer)]), + withFailureThreshold(failureThreshold): { livenessProbe+: { failureThreshold: failureThreshold } }, + '#withInitialDelaySeconds':: d.fn(help='"Number of seconds after the container has started before liveness probes are initiated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='initialDelaySeconds', type=d.T.integer)]), + withInitialDelaySeconds(initialDelaySeconds): { livenessProbe+: { initialDelaySeconds: initialDelaySeconds } }, + '#withPeriodSeconds':: d.fn(help='"How often (in seconds) to perform the probe.\\nDefault to 10 seconds. Minimum value is 1."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), + withPeriodSeconds(periodSeconds): { livenessProbe+: { periodSeconds: periodSeconds } }, + '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed.\\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), + withSuccessThreshold(successThreshold): { livenessProbe+: { successThreshold: successThreshold } }, + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\\nThe grace period is the duration in seconds after the processes running in the pod are sent\\na termination signal and the time when the processes are forcibly halted with a kill signal.\\nSet this value longer than the expected cleanup time for your process.\\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\\nvalue overrides the value provided by the pod spec.\\nValue must be non-negative integer. The value zero indicates stop immediately via\\nthe kill signal (no opportunity to shut down).\\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { livenessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, + '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out.\\nDefaults to 1 second. Minimum value is 1.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), + withTimeoutSeconds(timeoutSeconds): { livenessProbe+: { timeoutSeconds: timeoutSeconds } }, + }, + '#ports':: d.obj(help='"List of ports to expose from the container. Not specifying a port here\\nDOES NOT prevent that port from being exposed. Any port which is\\nlistening on the default \\"0.0.0.0\\" address inside a container will be\\naccessible from the network.\\nModifying this array with strategic merge patch may corrupt the data.\\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\\nCannot be updated."'), + ports: { + '#withContainerPort':: d.fn(help="\"Number of port to expose on the pod's IP address.\\nThis must be a valid port number, 0 \u003c x \u003c 65536.\"", args=[d.arg(name='containerPort', type=d.T.integer)]), + withContainerPort(containerPort): { containerPort: containerPort }, + '#withHostIP':: d.fn(help='"What host IP to bind the external port to."', args=[d.arg(name='hostIP', type=d.T.string)]), + withHostIP(hostIP): { hostIP: hostIP }, + '#withHostPort':: d.fn(help='"Number of port to expose on the host.\\nIf specified, this must be a valid port number, 0 < x < 65536.\\nIf HostNetwork is specified, this must match ContainerPort.\\nMost containers do not need this."', args=[d.arg(name='hostPort', type=d.T.integer)]), + withHostPort(hostPort): { hostPort: hostPort }, + '#withName':: d.fn(help='"If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\\nnamed port in a pod must have a unique name. Name for the port that can be\\nreferred to by services."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withProtocol':: d.fn(help='"Protocol for port. Must be UDP, TCP, or SCTP.\\nDefaults to \\"TCP\\"."', args=[d.arg(name='protocol', type=d.T.string)]), + withProtocol(protocol): { protocol: protocol }, + }, + '#readinessProbe':: d.obj(help='"Periodic probe of container service readiness.\\nContainer will be removed from service endpoints if the probe fails.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"'), + readinessProbe: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { readinessProbe+: { exec+: { command: if std.isArray(v=command) then command else [command] } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { readinessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, + }, + '#grpc':: d.obj(help='"GRPC specifies an action involving a GRPC port."'), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { readinessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest\\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { readinessProbe+: { grpc+: { service: service } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { readinessProbe+: { httpGet+: { host: host } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { readinessProbe+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { readinessProbe+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { readinessProbe+: { httpGet+: { path: path } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { readinessProbe+: { httpGet+: { port: port } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { readinessProbe+: { httpGet+: { scheme: scheme } } }, + }, + '#tcpSocket':: d.obj(help='"TCPSocket specifies an action involving a TCP port."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { readinessProbe+: { tcpSocket+: { host: host } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { readinessProbe+: { tcpSocket+: { port: port } } }, + }, + '#withFailureThreshold':: d.fn(help='"Minimum consecutive failures for the probe to be considered failed after having succeeded.\\nDefaults to 3. Minimum value is 1."', args=[d.arg(name='failureThreshold', type=d.T.integer)]), + withFailureThreshold(failureThreshold): { readinessProbe+: { failureThreshold: failureThreshold } }, + '#withInitialDelaySeconds':: d.fn(help='"Number of seconds after the container has started before liveness probes are initiated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='initialDelaySeconds', type=d.T.integer)]), + withInitialDelaySeconds(initialDelaySeconds): { readinessProbe+: { initialDelaySeconds: initialDelaySeconds } }, + '#withPeriodSeconds':: d.fn(help='"How often (in seconds) to perform the probe.\\nDefault to 10 seconds. Minimum value is 1."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), + withPeriodSeconds(periodSeconds): { readinessProbe+: { periodSeconds: periodSeconds } }, + '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed.\\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), + withSuccessThreshold(successThreshold): { readinessProbe+: { successThreshold: successThreshold } }, + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\\nThe grace period is the duration in seconds after the processes running in the pod are sent\\na termination signal and the time when the processes are forcibly halted with a kill signal.\\nSet this value longer than the expected cleanup time for your process.\\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\\nvalue overrides the value provided by the pod spec.\\nValue must be non-negative integer. The value zero indicates stop immediately via\\nthe kill signal (no opportunity to shut down).\\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { readinessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, + '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out.\\nDefaults to 1 second. Minimum value is 1.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), + withTimeoutSeconds(timeoutSeconds): { readinessProbe+: { timeoutSeconds: timeoutSeconds } }, + }, + '#resizePolicy':: d.obj(help='"Resources resize policy for the container."'), + resizePolicy: { + '#withResourceName':: d.fn(help='"Name of the resource to which this resource resize policy applies.\\nSupported values: cpu, memory."', args=[d.arg(name='resourceName', type=d.T.string)]), + withResourceName(resourceName): { resourceName: resourceName }, + '#withRestartPolicy':: d.fn(help='"Restart policy to apply when specified resource is resized.\\nIf not specified, it defaults to NotRequired."', args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, + }, + '#resources':: d.obj(help='"Compute Resources required by this container.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"'), + resources: { + '#claims':: d.obj(help='"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable. It can only be set for containers."'), + claims: { + '#withName':: d.fn(help='"Name must match the name of one entry in pod.spec.resourceClaims of\\nthe Pod where this field is used. It makes that resource available\\ninside a container."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#withClaims':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable. It can only be set for containers."', args=[d.arg(name='claims', type=d.T.array)]), + withClaims(claims): { resources+: { claims: if std.isArray(v=claims) then claims else [claims] } }, + '#withClaimsMixin':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable. It can only be set for containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='claims', type=d.T.array)]), + withClaimsMixin(claims): { resources+: { claims+: if std.isArray(v=claims) then claims else [claims] } }, + '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), + withLimits(limits): { resources+: { limits: limits } }, + '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), + withLimitsMixin(limits): { resources+: { limits+: limits } }, + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value. Requests cannot exceed Limits.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + withRequests(requests): { resources+: { requests: requests } }, + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value. Requests cannot exceed Limits.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + withRequestsMixin(requests): { resources+: { requests+: requests } }, + }, + '#securityContext':: d.obj(help='"SecurityContext defines the security options the container should be run with.\\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/"'), + securityContext: { + '#appArmorProfile':: d.obj(help="\"appArmorProfile is the AppArmor options to use by this container. If set, this profile\\noverrides the pod's appArmorProfile.\\nNote that this field cannot be set when spec.os.name is windows.\""), + appArmorProfile: { + '#withLocalhostProfile':: d.fn(help='"localhostProfile indicates a profile loaded on the node that should be used.\\nThe profile must be preconfigured on the node to work.\\nMust match the loaded name of the profile.\\nMust be set if and only if type is \\"Localhost\\"."', args=[d.arg(name='localhostProfile', type=d.T.string)]), + withLocalhostProfile(localhostProfile): { securityContext+: { appArmorProfile+: { localhostProfile: localhostProfile } } }, + '#withType':: d.fn(help="\"type indicates which kind of AppArmor profile will be applied.\\nValid options are:\\n Localhost - a profile pre-loaded on the node.\\n RuntimeDefault - the container runtime's default profile.\\n Unconfined - no AppArmor enforcement.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { securityContext+: { appArmorProfile+: { type: type } } }, + }, + '#capabilities':: d.obj(help='"The capabilities to add/drop when running containers.\\nDefaults to the default set of capabilities granted by the container runtime.\\nNote that this field cannot be set when spec.os.name is windows."'), + capabilities: { + '#withAdd':: d.fn(help='"Added capabilities"', args=[d.arg(name='add', type=d.T.array)]), + withAdd(add): { securityContext+: { capabilities+: { add: if std.isArray(v=add) then add else [add] } } }, + '#withAddMixin':: d.fn(help='"Added capabilities"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='add', type=d.T.array)]), + withAddMixin(add): { securityContext+: { capabilities+: { add+: if std.isArray(v=add) then add else [add] } } }, + '#withDrop':: d.fn(help='"Removed capabilities"', args=[d.arg(name='drop', type=d.T.array)]), + withDrop(drop): { securityContext+: { capabilities+: { drop: if std.isArray(v=drop) then drop else [drop] } } }, + '#withDropMixin':: d.fn(help='"Removed capabilities"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='drop', type=d.T.array)]), + withDropMixin(drop): { securityContext+: { capabilities+: { drop+: if std.isArray(v=drop) then drop else [drop] } } }, + }, + '#seLinuxOptions':: d.obj(help='"The SELinux context to be applied to the container.\\nIf unspecified, the container runtime will allocate a random SELinux context for each\\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows."'), + seLinuxOptions: { + '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), + withLevel(level): { securityContext+: { seLinuxOptions+: { level: level } } }, + '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), + withRole(role): { securityContext+: { seLinuxOptions+: { role: role } } }, + '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { securityContext+: { seLinuxOptions+: { type: type } } }, + '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { securityContext+: { seLinuxOptions+: { user: user } } }, + }, + '#seccompProfile':: d.obj(help='"The seccomp options to use by this container. If seccomp options are\\nprovided at both the pod & container level, the container options\\noverride the pod options.\\nNote that this field cannot be set when spec.os.name is windows."'), + seccompProfile: { + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used.\\nThe profile must be preconfigured on the node to work.\\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\\nMust be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + withLocalhostProfile(localhostProfile): { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } }, + '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied.\\nValid options are:\\n\\n\\nLocalhost - a profile defined in a file on the node should be used.\\nRuntimeDefault - the container runtime default profile should be used.\\nUnconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { securityContext+: { seccompProfile+: { type: type } } }, + }, + '#windowsOptions':: d.obj(help='"The Windows specific settings applied to all containers.\\nIf unspecified, the options from the PodSecurityContext will be used.\\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is linux."'), + windowsOptions: { + '#withGmsaCredentialSpec':: d.fn(help='"GMSACredentialSpec is where the GMSA admission webhook\\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\\nGMSA credential spec named by the GMSACredentialSpecName field."', args=[d.arg(name='gmsaCredentialSpec', type=d.T.string)]), + withGmsaCredentialSpec(gmsaCredentialSpec): { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } }, + '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), + withGmsaCredentialSpecName(gmsaCredentialSpecName): { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container.\\nAll of a Pod's containers must have the same effective HostProcess value\\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\\nIn addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } }, + '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process.\\nDefaults to the user specified in image metadata if unspecified.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), + withRunAsUserName(runAsUserName): { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } }, + }, + '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more\\nprivileges than its parent process. This bool directly controls if\\nthe no_new_privs flag will be set on the container process.\\nAllowPrivilegeEscalation is true always when the container is:\\n1) run as Privileged\\n2) has CAP_SYS_ADMIN\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), + withAllowPrivilegeEscalation(allowPrivilegeEscalation): { securityContext+: { allowPrivilegeEscalation: allowPrivilegeEscalation } }, + '#withPrivileged':: d.fn(help='"Run container in privileged mode.\\nProcesses in privileged containers are essentially equivalent to root on the host.\\nDefaults to false.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='privileged', type=d.T.boolean)]), + withPrivileged(privileged): { securityContext+: { privileged: privileged } }, + '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers.\\nThe default is DefaultProcMount which uses the container runtime defaults for\\nreadonly paths and masked paths.\\nThis requires the ProcMountType feature flag to be enabled.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='procMount', type=d.T.string)]), + withProcMount(procMount): { securityContext+: { procMount: procMount } }, + '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem.\\nDefault is false.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), + withReadOnlyRootFilesystem(readOnlyRootFilesystem): { securityContext+: { readOnlyRootFilesystem: readOnlyRootFilesystem } }, + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process.\\nUses runtime default if unset.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + withRunAsGroup(runAsGroup): { securityContext+: { runAsGroup: runAsGroup } }, + '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user.\\nIf true, the Kubelet will validate the image at runtime to ensure that it\\ndoes not run as UID 0 (root) and fail to start the container if it does.\\nIf unset or false, no such validation will be performed.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), + withRunAsNonRoot(runAsNonRoot): { securityContext+: { runAsNonRoot: runAsNonRoot } }, + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process.\\nDefaults to user specified in image metadata if unspecified.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + withRunAsUser(runAsUser): { securityContext+: { runAsUser: runAsUser } }, + }, + '#startupProbe':: d.obj(help="\"StartupProbe indicates that the Pod has successfully initialized.\\nIf specified, no other probes are executed until this completes successfully.\\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\\nThis cannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\""), + startupProbe: { + '#exec':: d.obj(help='"Exec specifies the action to take."'), + exec: { + '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { startupProbe+: { exec+: { command: if std.isArray(v=command) then command else [command] } } }, + '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the\\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\\na shell, you need to explicitly call out to that shell.\\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { startupProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, + }, + '#grpc':: d.obj(help='"GRPC specifies an action involving a GRPC port."'), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { startupProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest\\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { startupProbe+: { grpc+: { service: service } } }, + }, + '#httpGet':: d.obj(help='"HTTPGet specifies the http request to perform."'), + httpGet: { + '#httpHeaders':: d.obj(help='"Custom headers to set in the request. HTTP allows repeated headers."'), + httpHeaders: { + '#withName':: d.fn(help='"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set\\n\\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { startupProbe+: { httpGet+: { host: host } } }, + '#withHttpHeaders':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeaders(httpHeaders): { startupProbe+: { httpGet+: { httpHeaders: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withHttpHeadersMixin':: d.fn(help='"Custom headers to set in the request. HTTP allows repeated headers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='httpHeaders', type=d.T.array)]), + withHttpHeadersMixin(httpHeaders): { startupProbe+: { httpGet+: { httpHeaders+: if std.isArray(v=httpHeaders) then httpHeaders else [httpHeaders] } } }, + '#withPath':: d.fn(help='"Path to access on the HTTP server."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { startupProbe+: { httpGet+: { path: path } } }, + '#withPort':: d.fn(help='"Name or number of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { startupProbe+: { httpGet+: { port: port } } }, + '#withScheme':: d.fn(help='"Scheme to use for connecting to the host.\\nDefaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), + withScheme(scheme): { startupProbe+: { httpGet+: { scheme: scheme } } }, + }, + '#tcpSocket':: d.obj(help='"TCPSocket specifies an action involving a TCP port."'), + tcpSocket: { + '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), + withHost(host): { startupProbe+: { tcpSocket+: { host: host } } }, + '#withPort':: d.fn(help='"Number or name of the port to access on the container.\\nNumber must be in the range 1 to 65535.\\nName must be an IANA_SVC_NAME."', args=[d.arg(name='port', type=d.T.any)]), + withPort(port): { startupProbe+: { tcpSocket+: { port: port } } }, + }, + '#withFailureThreshold':: d.fn(help='"Minimum consecutive failures for the probe to be considered failed after having succeeded.\\nDefaults to 3. Minimum value is 1."', args=[d.arg(name='failureThreshold', type=d.T.integer)]), + withFailureThreshold(failureThreshold): { startupProbe+: { failureThreshold: failureThreshold } }, + '#withInitialDelaySeconds':: d.fn(help='"Number of seconds after the container has started before liveness probes are initiated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='initialDelaySeconds', type=d.T.integer)]), + withInitialDelaySeconds(initialDelaySeconds): { startupProbe+: { initialDelaySeconds: initialDelaySeconds } }, + '#withPeriodSeconds':: d.fn(help='"How often (in seconds) to perform the probe.\\nDefault to 10 seconds. Minimum value is 1."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), + withPeriodSeconds(periodSeconds): { startupProbe+: { periodSeconds: periodSeconds } }, + '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed.\\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), + withSuccessThreshold(successThreshold): { startupProbe+: { successThreshold: successThreshold } }, + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\\nThe grace period is the duration in seconds after the processes running in the pod are sent\\na termination signal and the time when the processes are forcibly halted with a kill signal.\\nSet this value longer than the expected cleanup time for your process.\\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\\nvalue overrides the value provided by the pod spec.\\nValue must be non-negative integer. The value zero indicates stop immediately via\\nthe kill signal (no opportunity to shut down).\\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { startupProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, + '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out.\\nDefaults to 1 second. Minimum value is 1.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), + withTimeoutSeconds(timeoutSeconds): { startupProbe+: { timeoutSeconds: timeoutSeconds } }, + }, + '#volumeDevices':: d.obj(help='"volumeDevices is the list of block devices to be used by the container."'), + volumeDevices: { + '#withDevicePath':: d.fn(help='"devicePath is the path inside of the container that the device will be mapped to."', args=[d.arg(name='devicePath', type=d.T.string)]), + withDevicePath(devicePath): { devicePath: devicePath }, + '#withName':: d.fn(help='"name must match the name of a persistentVolumeClaim in the pod"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#volumeMounts':: d.obj(help="\"Pod volumes to mount into the container's filesystem.\\nCannot be updated.\""), + volumeMounts: { + '#withMountPath':: d.fn(help="\"Path within the container at which the volume should be mounted. Must\\nnot contain ':'.\"", args=[d.arg(name='mountPath', type=d.T.string)]), + withMountPath(mountPath): { mountPath: mountPath }, + '#withMountPropagation':: d.fn(help='"mountPropagation determines how mounts are propagated from the host\\nto container and the other way around.\\nWhen not set, MountPropagationNone is used.\\nThis field is beta in 1.10.\\nWhen RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified\\n(which defaults to None)."', args=[d.arg(name='mountPropagation', type=d.T.string)]), + withMountPropagation(mountPropagation): { mountPropagation: mountPropagation }, + '#withName':: d.fn(help='"This must match the Name of a Volume."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withReadOnly':: d.fn(help='"Mounted read-only if true, read-write otherwise (false or unspecified).\\nDefaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withRecursiveReadOnly':: d.fn(help='"RecursiveReadOnly specifies whether read-only mounts should be handled\\nrecursively.\\n\\n\\nIf ReadOnly is false, this field has no meaning and must be unspecified.\\n\\n\\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made\\nrecursively read-only. If this field is set to IfPossible, the mount is made\\nrecursively read-only, if it is supported by the container runtime. If this\\nfield is set to Enabled, the mount is made recursively read-only if it is\\nsupported by the container runtime, otherwise the pod will not be started and\\nan error will be generated to indicate the reason.\\n\\n\\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to\\nNone (or be unspecified, which defaults to None).\\n\\n\\nIf this field is not specified, it is treated as an equivalent of Disabled."', args=[d.arg(name='recursiveReadOnly', type=d.T.string)]), + withRecursiveReadOnly(recursiveReadOnly): { recursiveReadOnly: recursiveReadOnly }, + '#withSubPath':: d.fn(help="\"Path within the volume from which the container's volume should be mounted.\\nDefaults to \\\"\\\" (volume's root).\"", args=[d.arg(name='subPath', type=d.T.string)]), + withSubPath(subPath): { subPath: subPath }, + '#withSubPathExpr':: d.fn(help="\"Expanded path within the volume from which the container's volume should be mounted.\\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\\nDefaults to \\\"\\\" (volume's root).\\nSubPathExpr and SubPath are mutually exclusive.\"", args=[d.arg(name='subPathExpr', type=d.T.string)]), + withSubPathExpr(subPathExpr): { subPathExpr: subPathExpr }, + }, + '#withArgs':: d.fn(help="\"Arguments to the entrypoint.\\nThe container image's CMD is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='args', type=d.T.array)]), + withArgs(args): { args: if std.isArray(v=args) then args else [args] }, + '#withArgsMixin':: d.fn(help="\"Arguments to the entrypoint.\\nThe container image's CMD is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='args', type=d.T.array)]), + withArgsMixin(args): { args+: if std.isArray(v=args) then args else [args] }, + '#withCommand':: d.fn(help="\"Entrypoint array. Not executed within a shell.\\nThe container image's ENTRYPOINT is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='command', type=d.T.array)]), + withCommand(command): { command: if std.isArray(v=command) then command else [command] }, + '#withCommandMixin':: d.fn(help="\"Entrypoint array. Not executed within a shell.\\nThe container image's ENTRYPOINT is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + withCommandMixin(command): { command+: if std.isArray(v=command) then command else [command] }, + '#withEnv':: d.fn(help='"List of environment variables to set in the container.\\nCannot be updated."', args=[d.arg(name='env', type=d.T.array)]), + withEnv(env): { env: if std.isArray(v=env) then env else [env] }, + '#withEnvFrom':: d.fn(help='"List of sources to populate environment variables in the container.\\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\\nwill be reported as an event when the container is starting. When a key exists in multiple\\nsources, the value associated with the last source will take precedence.\\nValues defined by an Env with a duplicate key will take precedence.\\nCannot be updated."', args=[d.arg(name='envFrom', type=d.T.array)]), + withEnvFrom(envFrom): { envFrom: if std.isArray(v=envFrom) then envFrom else [envFrom] }, + '#withEnvFromMixin':: d.fn(help='"List of sources to populate environment variables in the container.\\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\\nwill be reported as an event when the container is starting. When a key exists in multiple\\nsources, the value associated with the last source will take precedence.\\nValues defined by an Env with a duplicate key will take precedence.\\nCannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='envFrom', type=d.T.array)]), + withEnvFromMixin(envFrom): { envFrom+: if std.isArray(v=envFrom) then envFrom else [envFrom] }, + '#withEnvMixin':: d.fn(help='"List of environment variables to set in the container.\\nCannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='env', type=d.T.array)]), + withEnvMixin(env): { env+: if std.isArray(v=env) then env else [env] }, + '#withImage':: d.fn(help='"Container image name.\\nMore info: https://kubernetes.io/docs/concepts/containers/images\\nThis field is optional to allow higher level config management to default or override\\ncontainer images in workload controllers like Deployments and StatefulSets."', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { image: image }, + '#withImagePullPolicy':: d.fn(help='"Image pull policy.\\nOne of Always, Never, IfNotPresent.\\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images"', args=[d.arg(name='imagePullPolicy', type=d.T.string)]), + withImagePullPolicy(imagePullPolicy): { imagePullPolicy: imagePullPolicy }, + '#withName':: d.fn(help='"Name of the container specified as a DNS_LABEL.\\nEach container in a pod must have a unique name (DNS_LABEL).\\nCannot be updated."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPorts':: d.fn(help='"List of ports to expose from the container. Not specifying a port here\\nDOES NOT prevent that port from being exposed. Any port which is\\nlistening on the default \\"0.0.0.0\\" address inside a container will be\\naccessible from the network.\\nModifying this array with strategic merge patch may corrupt the data.\\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\\nCannot be updated."', args=[d.arg(name='ports', type=d.T.array)]), + withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, + '#withPortsMixin':: d.fn(help='"List of ports to expose from the container. Not specifying a port here\\nDOES NOT prevent that port from being exposed. Any port which is\\nlistening on the default \\"0.0.0.0\\" address inside a container will be\\naccessible from the network.\\nModifying this array with strategic merge patch may corrupt the data.\\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\\nCannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), + withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#withResizePolicy':: d.fn(help='"Resources resize policy for the container."', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicy(resizePolicy): { resizePolicy: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withResizePolicyMixin':: d.fn(help='"Resources resize policy for the container."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicyMixin(resizePolicy): { resizePolicy+: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withRestartPolicy':: d.fn(help="\"RestartPolicy defines the restart behavior of individual containers in a pod.\\nThis field may only be set for init containers, and the only allowed value is \\\"Always\\\".\\nFor non-init containers or when this field is not specified,\\nthe restart behavior is defined by the Pod's restart policy and the container type.\\nSetting the RestartPolicy as \\\"Always\\\" for the init container will have the following effect:\\nthis init container will be continually restarted on\\nexit until all regular containers have terminated. Once all regular\\ncontainers have completed, all init containers with restartPolicy \\\"Always\\\"\\nwill be shut down. This lifecycle differs from normal init containers and\\nis often referred to as a \\\"sidecar\\\" container. Although this init\\ncontainer still starts in the init container sequence, it does not wait\\nfor the container to complete before proceeding to the next init\\ncontainer. Instead, the next init container starts immediately after this\\ninit container is started, or after any startupProbe has successfully\\ncompleted.\"", args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, + '#withStdin':: d.fn(help='"Whether this container should allocate a buffer for stdin in the container runtime. If this\\nis not set, reads from stdin in the container will always result in EOF.\\nDefault is false."', args=[d.arg(name='stdin', type=d.T.boolean)]), + withStdin(stdin): { stdin: stdin }, + '#withStdinOnce':: d.fn(help='"Whether the container runtime should close the stdin channel after it has been opened by\\na single attach. When stdin is true the stdin stream will remain open across multiple attach\\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\\nat which time stdin is closed and remains closed until the container is restarted. If this\\nflag is false, a container processes that reads from stdin will never receive an EOF.\\nDefault is false"', args=[d.arg(name='stdinOnce', type=d.T.boolean)]), + withStdinOnce(stdinOnce): { stdinOnce: stdinOnce }, + '#withTerminationMessagePath':: d.fn(help="\"Optional: Path at which the file to which the container's termination message\\nwill be written is mounted into the container's filesystem.\\nMessage written is intended to be brief final status, such as an assertion failure message.\\nWill be truncated by the node if greater than 4096 bytes. The total message length across\\nall containers will be limited to 12kb.\\nDefaults to /dev/termination-log.\\nCannot be updated.\"", args=[d.arg(name='terminationMessagePath', type=d.T.string)]), + withTerminationMessagePath(terminationMessagePath): { terminationMessagePath: terminationMessagePath }, + '#withTerminationMessagePolicy':: d.fn(help='"Indicate how the termination message should be populated. File will use the contents of\\nterminationMessagePath to populate the container status message on both success and failure.\\nFallbackToLogsOnError will use the last chunk of container log output if the termination\\nmessage file is empty and the container exited with an error.\\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\\nDefaults to File.\\nCannot be updated."', args=[d.arg(name='terminationMessagePolicy', type=d.T.string)]), + withTerminationMessagePolicy(terminationMessagePolicy): { terminationMessagePolicy: terminationMessagePolicy }, + '#withTty':: d.fn(help="\"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\\nDefault is false.\"", args=[d.arg(name='tty', type=d.T.boolean)]), + withTty(tty): { tty: tty }, + '#withVolumeDevices':: d.fn(help='"volumeDevices is the list of block devices to be used by the container."', args=[d.arg(name='volumeDevices', type=d.T.array)]), + withVolumeDevices(volumeDevices): { volumeDevices: if std.isArray(v=volumeDevices) then volumeDevices else [volumeDevices] }, + '#withVolumeDevicesMixin':: d.fn(help='"volumeDevices is the list of block devices to be used by the container."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeDevices', type=d.T.array)]), + withVolumeDevicesMixin(volumeDevices): { volumeDevices+: if std.isArray(v=volumeDevices) then volumeDevices else [volumeDevices] }, + '#withVolumeMounts':: d.fn(help="\"Pod volumes to mount into the container's filesystem.\\nCannot be updated.\"", args=[d.arg(name='volumeMounts', type=d.T.array)]), + withVolumeMounts(volumeMounts): { volumeMounts: if std.isArray(v=volumeMounts) then volumeMounts else [volumeMounts] }, + '#withVolumeMountsMixin':: d.fn(help="\"Pod volumes to mount into the container's filesystem.\\nCannot be updated.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeMounts', type=d.T.array)]), + withVolumeMountsMixin(volumeMounts): { volumeMounts+: if std.isArray(v=volumeMounts) then volumeMounts else [volumeMounts] }, + '#withWorkingDir':: d.fn(help="\"Container's working directory.\\nIf not specified, the container runtime's default will be used, which\\nmight be configured in the container image.\\nCannot be updated.\"", args=[d.arg(name='workingDir', type=d.T.string)]), + withWorkingDir(workingDir): { workingDir: workingDir }, + }, + '#os':: d.obj(help='"Specifies the OS of the containers in the pod.\\nSome pod and container fields are restricted if this is set.\\n\\n\\nIf the OS field is set to linux, the following fields must be unset:\\n-securityContext.windowsOptions\\n\\n\\nIf the OS field is set to windows, following fields must be unset:\\n- spec.hostPID\\n- spec.hostIPC\\n- spec.hostUsers\\n- spec.securityContext.appArmorProfile\\n- spec.securityContext.seLinuxOptions\\n- spec.securityContext.seccompProfile\\n- spec.securityContext.fsGroup\\n- spec.securityContext.fsGroupChangePolicy\\n- spec.securityContext.sysctls\\n- spec.shareProcessNamespace\\n- spec.securityContext.runAsUser\\n- spec.securityContext.runAsGroup\\n- spec.securityContext.supplementalGroups\\n- spec.containers[*].securityContext.appArmorProfile\\n- spec.containers[*].securityContext.seLinuxOptions\\n- spec.containers[*].securityContext.seccompProfile\\n- spec.containers[*].securityContext.capabilities\\n- spec.containers[*].securityContext.readOnlyRootFilesystem\\n- spec.containers[*].securityContext.privileged\\n- spec.containers[*].securityContext.allowPrivilegeEscalation\\n- spec.containers[*].securityContext.procMount\\n- spec.containers[*].securityContext.runAsUser\\n- spec.containers[*].securityContext.runAsGroup"'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows.\\nAdditional value may be defined in future and can be one of:\\nhttps://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration\\nClients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { os+: { name: name } } } } } } }, + }, + '#readinessGates':: d.obj(help='"If specified, all readiness gates will be evaluated for pod readiness.\\nA pod is ready when all its containers are ready AND\\nall conditions specified in the readiness gates have status equal to \\"True\\"\\nMore info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"'), + readinessGates: { + '#withConditionType':: d.fn(help="\"ConditionType refers to a condition in the pod's condition list with matching type.\"", args=[d.arg(name='conditionType', type=d.T.string)]), + withConditionType(conditionType): { conditionType: conditionType }, + }, + '#resourceClaims':: d.obj(help='"ResourceClaims defines which ResourceClaims must be allocated\\nand reserved before the Pod is allowed to start. The resources\\nwill be made available to those containers which consume them\\nby name.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable."'), + resourceClaims: { + '#source':: d.obj(help='"Source describes where to find the ResourceClaim."'), + source: { + '#withResourceClaimName':: d.fn(help='"ResourceClaimName is the name of a ResourceClaim object in the same\\nnamespace as this pod."', args=[d.arg(name='resourceClaimName', type=d.T.string)]), + withResourceClaimName(resourceClaimName): { source+: { resourceClaimName: resourceClaimName } }, + '#withResourceClaimTemplateName':: d.fn(help='"ResourceClaimTemplateName is the name of a ResourceClaimTemplate\\nobject in the same namespace as this pod.\\n\\n\\nThe template will be used to create a new ResourceClaim, which will\\nbe bound to this pod. When this pod is deleted, the ResourceClaim\\nwill also be deleted. The pod name and resource name, along with a\\ngenerated component, will be used to form a unique name for the\\nResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\\n\\n\\nThis field is immutable and no changes will be made to the\\ncorresponding ResourceClaim by the control plane after creating the\\nResourceClaim."', args=[d.arg(name='resourceClaimTemplateName', type=d.T.string)]), + withResourceClaimTemplateName(resourceClaimTemplateName): { source+: { resourceClaimTemplateName: resourceClaimTemplateName } }, + }, + '#withName':: d.fn(help='"Name uniquely identifies this resource claim inside the pod.\\nThis must be a DNS_LABEL."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#schedulingGates':: d.obj(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod.\\nIf schedulingGates is not empty, the pod will stay in the SchedulingGated state and the\\nscheduler will not attempt to schedule the pod.\\n\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards."'), + schedulingGates: { + '#withName':: d.fn(help='"Name of the scheduling gate.\\nEach scheduling gate must have a unique name field."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#securityContext':: d.obj(help='"SecurityContext holds pod-level security attributes and common container settings.\\nOptional: Defaults to empty. See type description for default values of each field."'), + securityContext: { + '#appArmorProfile':: d.obj(help='"appArmorProfile is the AppArmor options to use by the containers in this pod.\\nNote that this field cannot be set when spec.os.name is windows."'), + appArmorProfile: { + '#withLocalhostProfile':: d.fn(help='"localhostProfile indicates a profile loaded on the node that should be used.\\nThe profile must be preconfigured on the node to work.\\nMust match the loaded name of the profile.\\nMust be set if and only if type is \\"Localhost\\"."', args=[d.arg(name='localhostProfile', type=d.T.string)]), + withLocalhostProfile(localhostProfile): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { appArmorProfile+: { localhostProfile: localhostProfile } } } } } } } }, + '#withType':: d.fn(help="\"type indicates which kind of AppArmor profile will be applied.\\nValid options are:\\n Localhost - a profile pre-loaded on the node.\\n RuntimeDefault - the container runtime's default profile.\\n Unconfined - no AppArmor enforcement.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { appArmorProfile+: { type: type } } } } } } } }, + }, + '#seLinuxOptions':: d.obj(help='"The SELinux context to be applied to all containers.\\nIf unspecified, the container runtime will allocate a random SELinux context for each\\ncontainer. May also be set in SecurityContext. If set in\\nboth SecurityContext and PodSecurityContext, the value specified in SecurityContext\\ntakes precedence for that container.\\nNote that this field cannot be set when spec.os.name is windows."'), + seLinuxOptions: { + '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), + withLevel(level): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { level: level } } } } } } } }, + '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), + withRole(role): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { role: role } } } } } } } }, + '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { type: type } } } } } } } }, + '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { user: user } } } } } } } }, + }, + '#seccompProfile':: d.obj(help='"The seccomp options to use by the containers in this pod.\\nNote that this field cannot be set when spec.os.name is windows."'), + seccompProfile: { + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used.\\nThe profile must be preconfigured on the node to work.\\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\\nMust be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + withLocalhostProfile(localhostProfile): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } } } }, + '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied.\\nValid options are:\\n\\n\\nLocalhost - a profile defined in a file on the node should be used.\\nRuntimeDefault - the container runtime default profile should be used.\\nUnconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } } } }, + }, + '#sysctls':: d.obj(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\\nsysctls (by the container runtime) might fail to launch.\\nNote that this field cannot be set when spec.os.name is windows."'), + sysctls: { + '#withName':: d.fn(help='"Name of a property to set"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withValue':: d.fn(help='"Value of a property to set"', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#windowsOptions':: d.obj(help="\"The Windows specific settings applied to all containers.\\nIf unspecified, the options within a container's SecurityContext will be used.\\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is linux.\""), + windowsOptions: { + '#withGmsaCredentialSpec':: d.fn(help='"GMSACredentialSpec is where the GMSA admission webhook\\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\\nGMSA credential spec named by the GMSACredentialSpecName field."', args=[d.arg(name='gmsaCredentialSpec', type=d.T.string)]), + withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } } } }, + '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), + withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container.\\nAll of a Pod's containers must have the same effective HostProcess value\\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\\nIn addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } } } }, + '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process.\\nDefaults to the user specified in image metadata if unspecified.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), + withRunAsUserName(runAsUserName): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } } } }, + }, + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod.\\nSome volume types allow the Kubelet to change the ownership of that volume\\nto be owned by the pod:\\n\\n\\n1. The owning GID will be the FSGroup\\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\\n3. The permission bits are OR'd with rw-rw----\\n\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\\nNote that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + withFsGroup(fsGroup): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } } } }, + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume\\nbefore being exposed inside Pod. This field will only apply to\\nvolume types which support fsGroup based ownership(and permissions).\\nIt will have no effect on ephemeral volume types such as: secret, configmaps\\nand emptydir.\\nValid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } } } }, + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process.\\nUses runtime default if unset.\\nMay also be set in SecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence\\nfor that container.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + withRunAsGroup(runAsGroup): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } } } }, + '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user.\\nIf true, the Kubelet will validate the image at runtime to ensure that it\\ndoes not run as UID 0 (root) and fail to start the container if it does.\\nIf unset or false, no such validation will be performed.\\nMay also be set in SecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), + withRunAsNonRoot(runAsNonRoot): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } } } }, + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process.\\nDefaults to user specified in image metadata if unspecified.\\nMay also be set in SecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence\\nfor that container.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + withRunAsUser(runAsUser): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } } } }, + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition\\nto the container's primary GID, the fsGroup (if specified), and group memberships\\ndefined in the container image for the uid of the container process. If unspecified,\\nno additional groups are added to any container. Note that group memberships\\ndefined in the container image for the uid of the container process are still effective,\\neven if they are not included in this list.\\nNote that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + withSupplementalGroups(supplementalGroups): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } } }, + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition\\nto the container's primary GID, the fsGroup (if specified), and group memberships\\ndefined in the container image for the uid of the container process. If unspecified,\\nno additional groups are added to any container. Note that group memberships\\ndefined in the container image for the uid of the container process are still effective,\\neven if they are not included in this list.\\nNote that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + withSupplementalGroupsMixin(supplementalGroups): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } } }, + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\\nsysctls (by the container runtime) might fail to launch.\\nNote that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), + withSysctls(sysctls): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } } }, + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\\nsysctls (by the container runtime) might fail to launch.\\nNote that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + withSysctlsMixin(sysctls): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } } }, + }, + '#tolerations':: d.obj(help="\"If specified, the pod's tolerations.\""), + tolerations: { + '#withEffect':: d.fn(help='"Effect indicates the taint effect to match. Empty means match all taint effects.\\nWhen specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute."', args=[d.arg(name='effect', type=d.T.string)]), + withEffect(effect): { effect: effect }, + '#withKey':: d.fn(help='"Key is the taint key that the toleration applies to. Empty means match all taint keys.\\nIf the key is empty, operator must be Exists; this combination means to match all values and all keys."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"Operator represents a key's relationship to the value.\\nValid operators are Exists and Equal. Defaults to Equal.\\nExists is equivalent to wildcard for value, so that a pod can\\ntolerate all taints of a particular category.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withTolerationSeconds':: d.fn(help='"TolerationSeconds represents the period of time the toleration (which must be\\nof effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,\\nit is not set, which means tolerate the taint forever (do not evict). Zero and\\nnegative values will be treated as 0 (evict immediately) by the system."', args=[d.arg(name='tolerationSeconds', type=d.T.integer)]), + withTolerationSeconds(tolerationSeconds): { tolerationSeconds: tolerationSeconds }, + '#withValue':: d.fn(help='"Value is the taint value the toleration matches to.\\nIf the operator is Exists, the value should be empty, otherwise just a regular string."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#topologySpreadConstraints':: d.obj(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology\\ndomains. Scheduler will schedule pods in a way which abides by the constraints.\\nAll topologySpreadConstraints are ANDed."'), + topologySpreadConstraints: { + '#labelSelector':: d.obj(help='"LabelSelector is used to find matching pods.\\nPods that match this label selector are counted to determine the number of pods\\nin their corresponding topology domain."'), + labelSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { labelSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { labelSelector+: { matchLabels+: matchLabels } }, + }, + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select the pods over which\\nspreading will be calculated. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are ANDed with labelSelector\\nto select the group of existing pods over which spreading will be calculated\\nfor the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\\nMatchLabelKeys cannot be set when LabelSelector isn't set.\\nKeys that don't exist in the incoming pod labels will\\nbe ignored. A null or empty list means only match against labelSelector.\\n\\n\\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select the pods over which\\nspreading will be calculated. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are ANDed with labelSelector\\nto select the group of existing pods over which spreading will be calculated\\nfor the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\\nMatchLabelKeys cannot be set when LabelSelector isn't set.\\nKeys that don't exist in the incoming pod labels will\\nbe ignored. A null or empty list means only match against labelSelector.\\n\\n\\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMaxSkew':: d.fn(help="\"MaxSkew describes the degree to which pods may be unevenly distributed.\\nWhen `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference\\nbetween the number of matching pods in the target topology and the global minimum.\\nThe global minimum is the minimum number of matching pods in an eligible domain\\nor zero if the number of eligible domains is less than MinDomains.\\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\\nlabelSelector spread as 2/2/1:\\nIn this case, the global minimum is 1.\\n| zone1 | zone2 | zone3 |\\n| P P | P P | P |\\n- if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;\\nscheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)\\nviolate MaxSkew(1).\\n- if MaxSkew is 2, incoming pod can be scheduled onto any zone.\\nWhen `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence\\nto topologies that satisfy it.\\nIt's a required field. Default value is 1 and 0 is not allowed.\"", args=[d.arg(name='maxSkew', type=d.T.integer)]), + withMaxSkew(maxSkew): { maxSkew: maxSkew }, + '#withMinDomains':: d.fn(help="\"MinDomains indicates a minimum number of eligible domains.\\nWhen the number of eligible domains with matching topology keys is less than minDomains,\\nPod Topology Spread treats \\\"global minimum\\\" as 0, and then the calculation of Skew is performed.\\nAnd when the number of eligible domains with matching topology keys equals or greater than minDomains,\\nthis value has no effect on scheduling.\\nAs a result, when the number of eligible domains is less than minDomains,\\nscheduler won't schedule more than maxSkew Pods to those domains.\\nIf value is nil, the constraint behaves as if MinDomains is equal to 1.\\nValid values are integers greater than 0.\\nWhen value is not nil, WhenUnsatisfiable must be DoNotSchedule.\\n\\n\\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same\\nlabelSelector spread as 2/2/2:\\n| zone1 | zone2 | zone3 |\\n| P P | P P | P P |\\nThe number of domains is less than 5(MinDomains), so \\\"global minimum\\\" is treated as 0.\\nIn this situation, new pod with the same labelSelector cannot be scheduled,\\nbecause computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,\\nit will violate MaxSkew.\"", args=[d.arg(name='minDomains', type=d.T.integer)]), + withMinDomains(minDomains): { minDomains: minDomains }, + '#withNodeAffinityPolicy':: d.fn(help="\"NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector\\nwhen calculating pod topology spread skew. Options are:\\n- Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.\\n- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\\n\\n\\nIf this value is nil, the behavior is equivalent to the Honor policy.\\nThis is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.\"", args=[d.arg(name='nodeAffinityPolicy', type=d.T.string)]), + withNodeAffinityPolicy(nodeAffinityPolicy): { nodeAffinityPolicy: nodeAffinityPolicy }, + '#withNodeTaintsPolicy':: d.fn(help='"NodeTaintsPolicy indicates how we will treat node taints when calculating\\npod topology spread skew. Options are:\\n- Honor: nodes without taints, along with tainted nodes for which the incoming pod\\nhas a toleration, are included.\\n- Ignore: node taints are ignored. All nodes are included.\\n\\n\\nIf this value is nil, the behavior is equivalent to the Ignore policy.\\nThis is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."', args=[d.arg(name='nodeTaintsPolicy', type=d.T.string)]), + withNodeTaintsPolicy(nodeTaintsPolicy): { nodeTaintsPolicy: nodeTaintsPolicy }, + '#withTopologyKey':: d.fn(help="\"TopologyKey is the key of node labels. Nodes that have a label with this key\\nand identical values are considered to be in the same topology.\\nWe consider each \u003ckey, value\u003e as a \\\"bucket\\\", and try to put balanced number\\nof pods into each bucket.\\nWe define a domain as a particular instance of a topology.\\nAlso, we define an eligible domain as a domain whose nodes meet the requirements of\\nnodeAffinityPolicy and nodeTaintsPolicy.\\ne.g. If TopologyKey is \\\"kubernetes.io/hostname\\\", each Node is a domain of that topology.\\nAnd, if TopologyKey is \\\"topology.kubernetes.io/zone\\\", each zone is a domain of that topology.\\nIt's a required field.\"", args=[d.arg(name='topologyKey', type=d.T.string)]), + withTopologyKey(topologyKey): { topologyKey: topologyKey }, + '#withWhenUnsatisfiable':: d.fn(help="\"WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy\\nthe spread constraint.\\n- DoNotSchedule (default) tells the scheduler not to schedule it.\\n- ScheduleAnyway tells the scheduler to schedule the pod in any location,\\n but giving higher precedence to topologies that would help reduce the\\n skew.\\nA constraint is considered \\\"Unsatisfiable\\\" for an incoming pod\\nif and only if every possible node assignment for that pod would violate\\n\\\"MaxSkew\\\" on some topology.\\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\\nlabelSelector spread as 3/1/1:\\n| zone1 | zone2 | zone3 |\\n| P P P | P | P |\\nIf WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled\\nto zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies\\nMaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler\\nwon't make it *more* imbalanced.\\nIt's a required field.\"", args=[d.arg(name='whenUnsatisfiable', type=d.T.string)]), + withWhenUnsatisfiable(whenUnsatisfiable): { whenUnsatisfiable: whenUnsatisfiable }, + }, + '#volumes':: d.obj(help='"List of volumes that can be mounted by containers belonging to the pod.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes"'), + volumes: { + '#awsElasticBlockStore':: d.obj(help="\"awsElasticBlockStore represents an AWS Disk resource that is attached to a\\nkubelet's host machine and then exposed to the pod.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\""), + awsElasticBlockStore: { + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\\nTODO: how do we prevent errors in the filesystem from compromising the machine"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { awsElasticBlockStore+: { fsType: fsType } }, + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount.\\nIf omitted, the default is to mount by volume name.\\nExamples: For volume /dev/sda1, you specify the partition as \\"1\\".\\nSimilarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + withPartition(partition): { awsElasticBlockStore+: { partition: partition } }, + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { awsElasticBlockStore+: { readOnly: readOnly } }, + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + withVolumeID(volumeID): { awsElasticBlockStore+: { volumeID: volumeID } }, + }, + '#azureDisk':: d.obj(help='"azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), + azureDisk: { + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + withCachingMode(cachingMode): { azureDisk+: { cachingMode: cachingMode } }, + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + withDiskName(diskName): { azureDisk+: { diskName: diskName } }, + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + withDiskURI(diskURI): { azureDisk+: { diskURI: diskURI } }, + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { azureDisk+: { fsType: fsType } }, + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { azureDisk+: { kind: kind } }, + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { azureDisk+: { readOnly: readOnly } }, + }, + '#azureFile':: d.obj(help='"azureFile represents an Azure File Service mount on the host and bind mount to the pod."'), + azureFile: { + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { azureFile+: { readOnly: readOnly } }, + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + withSecretName(secretName): { azureFile+: { secretName: secretName } }, + '#withShareName':: d.fn(help='"shareName is the azure share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + withShareName(shareName): { azureFile+: { shareName: shareName } }, + }, + '#cephfs':: d.obj(help="\"cephFS represents a Ceph FS mount on the host that shares a pod's lifetime\""), + cephfs: { + '#secretRef':: d.obj(help='"secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { cephfs+: { secretRef+: { name: name } } }, + }, + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitors(monitors): { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } }, + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitorsMixin(monitors): { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } }, + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { cephfs+: { path: path } }, + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { cephfs+: { readOnly: readOnly } }, + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + withSecretFile(secretFile): { cephfs+: { secretFile: secretFile } }, + '#withUser':: d.fn(help='"user is optional: User is the rados user name, default is admin\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { cephfs+: { user: user } }, + }, + '#cinder':: d.obj(help='"cinder represents a cinder volume attached and mounted on kubelets host machine.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md"'), + cinder: { + '#secretRef':: d.obj(help='"secretRef is optional: points to a secret object containing parameters used to connect\\nto OpenStack."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { cinder+: { secretRef+: { name: name } } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nExamples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { cinder+: { fsType: fsType } }, + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { cinder+: { readOnly: readOnly } }, + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + withVolumeID(volumeID): { cinder+: { volumeID: volumeID } }, + }, + '#configMap':: d.obj(help='"configMap represents a configMap that should populate this volume"'), + configMap: { + '#items':: d.obj(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\""), + items: { + '#withKey':: d.fn(help='"key is the key to project."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withMode':: d.fn(help='"mode is Optional: mode bits used to set permissions on this file.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"path is the relative path of the file to map the key to.\\nMay not be an absolute path.\\nMay not contain the path element '..'.\\nMay not start with the string '..'.\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + }, + '#withDefaultMode':: d.fn(help='"defaultMode is optional: mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nDefaults to 0644.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { configMap+: { defaultMode: defaultMode } }, + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { configMap+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { configMap+: { items+: if std.isArray(v=items) then items else [items] } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { configMap+: { name: name } }, + '#withOptional':: d.fn(help='"optional specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { configMap+: { optional: optional } }, + }, + '#csi':: d.obj(help='"csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature)."'), + csi: { + '#nodePublishSecretRef':: d.obj(help='"nodePublishSecretRef is a reference to the secret object containing\\nsensitive information to pass to the CSI driver to complete the CSI\\nNodePublishVolume and NodeUnpublishVolume calls.\\nThis field is optional, and may be empty if no secret is required. If the\\nsecret object contains more than one secret, all secret references are passed."'), + nodePublishSecretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { csi+: { nodePublishSecretRef+: { name: name } } }, + }, + '#withDriver':: d.fn(help='"driver is the name of the CSI driver that handles this volume.\\nConsult with your admin for the correct name as registered in the cluster."', args=[d.arg(name='driver', type=d.T.string)]), + withDriver(driver): { csi+: { driver: driver } }, + '#withFsType':: d.fn(help='"fsType to mount. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\".\\nIf not provided, the empty value is passed to the associated CSI driver\\nwhich will determine the default filesystem to apply."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { csi+: { fsType: fsType } }, + '#withReadOnly':: d.fn(help='"readOnly specifies a read-only configuration for the volume.\\nDefaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { csi+: { readOnly: readOnly } }, + '#withVolumeAttributes':: d.fn(help="\"volumeAttributes stores driver-specific properties that are passed to the CSI\\ndriver. Consult your driver's documentation for supported values.\"", args=[d.arg(name='volumeAttributes', type=d.T.object)]), + withVolumeAttributes(volumeAttributes): { csi+: { volumeAttributes: volumeAttributes } }, + '#withVolumeAttributesMixin':: d.fn(help="\"volumeAttributes stores driver-specific properties that are passed to the CSI\\ndriver. Consult your driver's documentation for supported values.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeAttributes', type=d.T.object)]), + withVolumeAttributesMixin(volumeAttributes): { csi+: { volumeAttributes+: volumeAttributes } }, + }, + '#downwardAPI':: d.obj(help='"downwardAPI represents downward API about the pod that should populate this volume"'), + downwardAPI: { + '#items':: d.obj(help='"Items is a list of downward API volume file"'), + items: { + '#fieldRef':: d.obj(help='"Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported."'), + fieldRef: { + '#withApiVersion':: d.fn(help='"Version of the schema the FieldPath is written in terms of, defaults to \\"v1\\"."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { fieldRef+: { apiVersion: apiVersion } }, + '#withFieldPath':: d.fn(help='"Path of the field to select in the specified API version."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { fieldRef+: { fieldPath: fieldPath } }, + }, + '#resourceFieldRef':: d.obj(help='"Selects a resource of the container: only resources limits and requests\\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported."'), + resourceFieldRef: { + '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { resourceFieldRef+: { containerName: containerName } }, + '#withDivisor':: d.fn(help='"Specifies the output format of the exposed resources, defaults to \\"1\\', args=[d.arg(name='divisor', type=d.T.any)]), + withDivisor(divisor): { resourceFieldRef+: { divisor: divisor } }, + '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { resourceFieldRef+: { resource: resource } }, + }, + '#withMode':: d.fn(help='"Optional: mode bits used to set permissions on this file, must be an octal value\\nbetween 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + }, + '#withDefaultMode':: d.fn(help='"Optional: mode bits to use on created files by default. Must be a\\nOptional: mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nDefaults to 0644.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { downwardAPI+: { defaultMode: defaultMode } }, + '#withItems':: d.fn(help='"Items is a list of downward API volume file"', args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { downwardAPI+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help='"Items is a list of downward API volume file"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { downwardAPI+: { items+: if std.isArray(v=items) then items else [items] } }, + }, + '#emptyDir':: d.obj(help="\"emptyDir represents a temporary directory that shares a pod's lifetime.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\""), + emptyDir: { + '#withMedium':: d.fn(help="\"medium represents what type of storage medium should back this directory.\\nThe default is \\\"\\\" which means to use the node's default medium.\\nMust be an empty string (default) or Memory.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\"", args=[d.arg(name='medium', type=d.T.string)]), + withMedium(medium): { emptyDir+: { medium: medium } }, + '#withSizeLimit':: d.fn(help='"sizeLimit is the total amount of local storage required for this EmptyDir volume.\\nThe size limit is also applicable for memory medium.\\nThe maximum usage on memory medium EmptyDir would be the minimum value between\\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\\nThe default is nil which means that the limit is undefined.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir"', args=[d.arg(name='sizeLimit', type=d.T.any)]), + withSizeLimit(sizeLimit): { emptyDir+: { sizeLimit: sizeLimit } }, + }, + '#ephemeral':: d.obj(help="\"ephemeral represents a volume that is handled by a cluster storage driver.\\nThe volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,\\nand deleted when the pod is removed.\\n\\n\\nUse this if:\\na) the volume is only needed while the pod runs,\\nb) features of normal volumes like restoring from snapshot or capacity\\n tracking are needed,\\nc) the storage driver is specified through a storage class, and\\nd) the storage driver supports dynamic volume provisioning through\\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\\n information on the connection between this volume type\\n and PersistentVolumeClaim).\\n\\n\\nUse PersistentVolumeClaim or one of the vendor-specific\\nAPIs for volumes that persist for longer than the lifecycle\\nof an individual pod.\\n\\n\\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to\\nbe used that way - see the documentation of the driver for\\nmore information.\\n\\n\\nA pod can use both types of ephemeral volumes and\\npersistent volumes at the same time.\""), + ephemeral: { + '#volumeClaimTemplate':: d.obj(help='"Will be used to create a stand-alone PVC to provision the volume.\\nThe pod in which this EphemeralVolumeSource is embedded will be the\\nowner of the PVC, i.e. the PVC will be deleted together with the\\npod. The name of the PVC will be `-` where\\n`` is the name from the `PodSpec.Volumes` array\\nentry. Pod validation will reject the pod if the concatenated name\\nis not valid for a PVC (for example, too long).\\n\\n\\nAn existing PVC with that name that is not owned by the pod\\nwill *not* be used for the pod to avoid using an unrelated\\nvolume by mistake. Starting the pod is then blocked until\\nthe unrelated PVC is removed. If such a pre-created PVC is\\nmeant to be used by the pod, the PVC has to updated with an\\nowner reference to the pod once the pod exists. Normally\\nthis should not be necessary, but it may be useful when\\nmanually reconstructing a broken cluster.\\n\\n\\nThis field is read-only and no changes will be made by Kubernetes\\nto the PVC after it has been created.\\n\\n\\nRequired, must not be nil."'), + volumeClaimTemplate: { + '#metadata':: d.obj(help='"May contain labels and annotations that will be copied into the PVC\\nwhen creating it. No other fields are allowed and will be rejected during\\nvalidation."'), + metadata: { + '#withAnnotations':: d.fn(help='', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { ephemeral+: { volumeClaimTemplate+: { metadata+: { annotations: annotations } } } }, + '#withAnnotationsMixin':: d.fn(help='\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { ephemeral+: { volumeClaimTemplate+: { metadata+: { annotations+: annotations } } } }, + '#withFinalizers':: d.fn(help='', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { ephemeral+: { volumeClaimTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, + '#withFinalizersMixin':: d.fn(help='\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { ephemeral+: { volumeClaimTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, + '#withLabels':: d.fn(help='', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { ephemeral+: { volumeClaimTemplate+: { metadata+: { labels: labels } } } }, + '#withLabelsMixin':: d.fn(help='\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { ephemeral+: { volumeClaimTemplate+: { metadata+: { labels+: labels } } } }, + '#withName':: d.fn(help='', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { ephemeral+: { volumeClaimTemplate+: { metadata+: { name: name } } } }, + '#withNamespace':: d.fn(help='', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { ephemeral+: { volumeClaimTemplate+: { metadata+: { namespace: namespace } } } }, + }, + '#spec':: d.obj(help='"The specification for the PersistentVolumeClaim. The entire content is\\ncopied unchanged into the PVC that gets created from this\\ntemplate. The same fields as in a PersistentVolumeClaim\\nare also valid here."'), + spec: { + '#dataSource':: d.obj(help='"dataSource field can be used to specify either:\\n* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)\\n* An existing PVC (PersistentVolumeClaim)\\nIf the provisioner or an external controller can support the specified data source,\\nit will create a new volume based on the contents of the specified data source.\\nWhen the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,\\nand dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.\\nIf the namespace is specified, then dataSourceRef will not be copied to dataSource."'), + dataSource: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced.\\nIf APIGroup is not specified, the specified Kind must be in the core API group.\\nFor any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSource+: { apiGroup: apiGroup } } } } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSource+: { kind: kind } } } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSource+: { name: name } } } } }, + }, + '#dataSourceRef':: d.obj(help="\"dataSourceRef specifies the object from which to populate the volume with data, if a non-empty\\nvolume is desired. This may be any object from a non-empty API group (non\\ncore object) or a PersistentVolumeClaim object.\\nWhen this field is specified, volume binding will only succeed if the type of\\nthe specified object matches some installed volume populator or dynamic\\nprovisioner.\\nThis field will replace the functionality of the dataSource field and as such\\nif both fields are non-empty, they must have the same value. For backwards\\ncompatibility, when namespace isn't specified in dataSourceRef,\\nboth fields (dataSource and dataSourceRef) will be set to the same\\nvalue automatically if one of them is empty and the other is non-empty.\\nWhen namespace is specified in dataSourceRef,\\ndataSource isn't set to the same value and must be empty.\\nThere are three important differences between dataSource and dataSourceRef:\\n* While dataSource only allows two specific types of objects, dataSourceRef\\n allows any non-core object, as well as PersistentVolumeClaim objects.\\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\\n preserves all values, and generates an error if a disallowed value is\\n specified.\\n* While dataSource only allows local objects, dataSourceRef allows objects\\n in any namespaces.\\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\\n(Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\""), + dataSourceRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced.\\nIf APIGroup is not specified, the specified Kind must be in the core API group.\\nFor any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { apiGroup: apiGroup } } } } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { kind: kind } } } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { name: name } } } } }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced\\nNote that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.\\n(Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { namespace: namespace } } } } }, + }, + '#resources':: d.obj(help='"resources represents the minimum resources the volume should have.\\nIf RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements\\nthat are lower than previous value but must still be higher than capacity recorded in the\\nstatus field of the claim.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources"'), + resources: { + '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), + withLimits(limits): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { limits: limits } } } } }, + '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), + withLimitsMixin(limits): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { limits+: limits } } } } }, + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value. Requests cannot exceed Limits.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + withRequests(requests): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { requests: requests } } } } }, + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value. Requests cannot exceed Limits.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + withRequestsMixin(requests): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { requests+: requests } } } } }, + }, + '#selector':: d.obj(help='"selector is a label query over volumes to consider for binding."'), + selector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { ephemeral+: { volumeClaimTemplate+: { spec+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { ephemeral+: { volumeClaimTemplate+: { spec+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { ephemeral+: { volumeClaimTemplate+: { spec+: { selector+: { matchLabels: matchLabels } } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { ephemeral+: { volumeClaimTemplate+: { spec+: { selector+: { matchLabels+: matchLabels } } } } }, + }, + '#withAccessModes':: d.fn(help='"accessModes contains the desired access modes the volume should have.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + withAccessModes(accessModes): { ephemeral+: { volumeClaimTemplate+: { spec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, + '#withAccessModesMixin':: d.fn(help='"accessModes contains the desired access modes the volume should have.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + withAccessModesMixin(accessModes): { ephemeral+: { volumeClaimTemplate+: { spec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, + '#withStorageClassName':: d.fn(help='"storageClassName is the name of the StorageClass required by the claim.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), + withStorageClassName(storageClassName): { ephemeral+: { volumeClaimTemplate+: { spec+: { storageClassName: storageClassName } } } }, + '#withVolumeAttributesClassName':: d.fn(help="\"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.\\nIf specified, the CSI driver will create or update the volume with the attributes defined\\nin the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,\\nit can be changed after the claim is created. An empty string value means that no VolumeAttributesClass\\nwill be applied to the claim but it's not allowed to reset this field to empty string once it is set.\\nIf unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass\\nwill be set by the persistentvolume controller if it exists.\\nIf the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be\\nset to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource\\nexists.\\nMore info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/\\n(Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\"", args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { ephemeral+: { volumeClaimTemplate+: { spec+: { volumeAttributesClassName: volumeAttributesClassName } } } }, + '#withVolumeMode':: d.fn(help='"volumeMode defines what type of volume is required by the claim.\\nValue of Filesystem is implied when not included in claim spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), + withVolumeMode(volumeMode): { ephemeral+: { volumeClaimTemplate+: { spec+: { volumeMode: volumeMode } } } }, + '#withVolumeName':: d.fn(help='"volumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), + withVolumeName(volumeName): { ephemeral+: { volumeClaimTemplate+: { spec+: { volumeName: volumeName } } } }, + }, + }, + }, + '#fc':: d.obj(help="\"fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.\""), + fc: { + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified.\\nTODO: how do we prevent errors in the filesystem from compromising the machine"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fc+: { fsType: fsType } }, + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + withLun(lun): { fc+: { lun: lun } }, + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { fc+: { readOnly: readOnly } }, + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + withTargetWWNs(targetWWNs): { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } }, + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + withTargetWWNsMixin(targetWWNs): { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } }, + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids)\\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + withWwids(wwids): { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } }, + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids)\\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + withWwidsMixin(wwids): { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } }, + }, + '#flexVolume':: d.obj(help='"flexVolume represents a generic volume resource that is\\nprovisioned/attached using an exec based plugin."'), + flexVolume: { + '#secretRef':: d.obj(help='"secretRef is Optional: secretRef is reference to the secret object containing\\nsensitive information to pass to the plugin scripts. This may be\\nempty if no secret object is specified. If the secret object\\ncontains more than one secret, all secrets are passed to the plugin\\nscripts."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { flexVolume+: { secretRef+: { name: name } } }, + }, + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + withDriver(driver): { flexVolume+: { driver: driver } }, + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { flexVolume+: { fsType: fsType } }, + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + withOptions(options): { flexVolume+: { options: options } }, + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + withOptionsMixin(options): { flexVolume+: { options+: options } }, + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { flexVolume+: { readOnly: readOnly } }, + }, + '#flocker':: d.obj(help="\"flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running\""), + flocker: { + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker\\nshould be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + withDatasetName(datasetName): { flocker+: { datasetName: datasetName } }, + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + withDatasetUUID(datasetUUID): { flocker+: { datasetUUID: datasetUUID } }, + }, + '#gcePersistentDisk':: d.obj(help="\"gcePersistentDisk represents a GCE Disk resource that is attached to a\\nkubelet's host machine and then exposed to the pod.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\""), + gcePersistentDisk: { + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\\nTODO: how do we prevent errors in the filesystem from compromising the machine"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { gcePersistentDisk+: { fsType: fsType } }, + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount.\\nIf omitted, the default is to mount by volume name.\\nExamples: For volume /dev/sda1, you specify the partition as \\"1\\".\\nSimilarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty).\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + withPartition(partition): { gcePersistentDisk+: { partition: partition } }, + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + withPdName(pdName): { gcePersistentDisk+: { pdName: pdName } }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts.\\nDefaults to false.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { gcePersistentDisk+: { readOnly: readOnly } }, + }, + '#gitRepo':: d.obj(help="\"gitRepo represents a git repository at a particular revision.\\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\\ninto the Pod's container.\""), + gitRepo: { + '#withDirectory':: d.fn(help="\"directory is the target directory name.\\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\\ngit repository. Otherwise, if specified, the volume will contain the git repository in\\nthe subdirectory with the given name.\"", args=[d.arg(name='directory', type=d.T.string)]), + withDirectory(directory): { gitRepo+: { directory: directory } }, + '#withRepository':: d.fn(help='"repository is the URL"', args=[d.arg(name='repository', type=d.T.string)]), + withRepository(repository): { gitRepo+: { repository: repository } }, + '#withRevision':: d.fn(help='"revision is the commit hash for the specified revision."', args=[d.arg(name='revision', type=d.T.string)]), + withRevision(revision): { gitRepo+: { revision: revision } }, + }, + '#glusterfs':: d.obj(help="\"glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md\""), + glusterfs: { + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + withEndpoints(endpoints): { glusterfs+: { endpoints: endpoints } }, + '#withPath':: d.fn(help='"path is the Glusterfs volume path.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { glusterfs+: { path: path } }, + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions.\\nDefaults to false.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { glusterfs+: { readOnly: readOnly } }, + }, + '#hostPath':: d.obj(help='"hostPath represents a pre-existing file or directory on the host\\nmachine that is directly exposed to the container. This is generally\\nused for system agents or other privileged things that are allowed\\nto see the host machine. Most containers will NOT need this.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\\n---\\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\\nmount host directories as read/write."'), + hostPath: { + '#withPath':: d.fn(help='"path of the directory on the host.\\nIf the path is a symlink, it will follow the link to the real path.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { hostPath+: { path: path } }, + '#withType':: d.fn(help='"type for HostPath Volume\\nDefaults to \\"\\"\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { hostPath+: { type: type } }, + }, + '#iscsi':: d.obj(help="\"iscsi represents an ISCSI Disk resource that is attached to a\\nkubelet's host machine and then exposed to the pod.\\nMore info: https://examples.k8s.io/volumes/iscsi/README.md\""), + iscsi: { + '#secretRef':: d.obj(help='"secretRef is the CHAP Secret for iSCSI target and initiator authentication"'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { iscsi+: { secretRef+: { name: name } } }, + }, + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + withChapAuthDiscovery(chapAuthDiscovery): { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } }, + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + withChapAuthSession(chapAuthSession): { iscsi+: { chapAuthSession: chapAuthSession } }, + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\\nTODO: how do we prevent errors in the filesystem from compromising the machine"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { iscsi+: { fsType: fsType } }, + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name.\\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\\n: will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + withInitiatorName(initiatorName): { iscsi+: { initiatorName: initiatorName } }, + '#withIqn':: d.fn(help='"iqn is the target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + withIqn(iqn): { iscsi+: { iqn: iqn } }, + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport.\\nDefaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + withIscsiInterface(iscsiInterface): { iscsi+: { iscsiInterface: iscsiInterface } }, + '#withLun':: d.fn(help='"lun represents iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + withLun(lun): { iscsi+: { lun: lun } }, + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\\nis other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + withPortals(portals): { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } }, + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\\nis other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + withPortalsMixin(portals): { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts.\\nDefaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { iscsi+: { readOnly: readOnly } }, + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\\nis other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + withTargetPortal(targetPortal): { iscsi+: { targetPortal: targetPortal } }, + }, + '#nfs':: d.obj(help="\"nfs represents an NFS mount on the host that shares a pod's lifetime\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\""), + nfs: { + '#withPath':: d.fn(help='"path that is exported by the NFS server.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { nfs+: { path: path } }, + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions.\\nDefaults to false.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { nfs+: { readOnly: readOnly } }, + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + withServer(server): { nfs+: { server: server } }, + }, + '#persistentVolumeClaim':: d.obj(help='"persistentVolumeClaimVolumeSource represents a reference to a\\nPersistentVolumeClaim in the same namespace.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"'), + persistentVolumeClaim: { + '#withClaimName':: d.fn(help='"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"', args=[d.arg(name='claimName', type=d.T.string)]), + withClaimName(claimName): { persistentVolumeClaim+: { claimName: claimName } }, + '#withReadOnly':: d.fn(help='"readOnly Will force the ReadOnly setting in VolumeMounts.\\nDefault false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { persistentVolumeClaim+: { readOnly: readOnly } }, + }, + '#photonPersistentDisk':: d.obj(help='"photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"'), + photonPersistentDisk: { + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { photonPersistentDisk+: { fsType: fsType } }, + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + withPdID(pdID): { photonPersistentDisk+: { pdID: pdID } }, + }, + '#portworxVolume':: d.obj(help='"portworxVolume represents a portworx volume attached and mounted on kubelets host machine"'), + portworxVolume: { + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { portworxVolume+: { fsType: fsType } }, + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { portworxVolume+: { readOnly: readOnly } }, + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + withVolumeID(volumeID): { portworxVolume+: { volumeID: volumeID } }, + }, + '#projected':: d.obj(help='"projected items for all in one resources secrets, configmaps, and downward API"'), + projected: { + '#sources':: d.obj(help='"sources is the list of volume projections"'), + sources: { + '#clusterTrustBundle':: d.obj(help='"ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field\\nof ClusterTrustBundle objects in an auto-updating file.\\n\\n\\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\\n\\n\\nClusterTrustBundle objects can either be selected by name, or by the\\ncombination of signer name and a label selector.\\n\\n\\nKubelet performs aggressive normalization of the PEM contents written\\ninto the pod filesystem. Esoteric PEM features such as inter-block\\ncomments and block headers are stripped. Certificates are deduplicated.\\nThe ordering of certificates within the file is arbitrary, and Kubelet\\nmay change the order over time."'), + clusterTrustBundle: { + '#labelSelector':: d.obj(help='"Select all ClusterTrustBundles that match this label selector. Only has\\neffect if signerName is set. Mutually-exclusive with name. If unset,\\ninterpreted as \\"match nothing\\". If set but empty, interpreted as \\"match\\neverything\\"."'), + labelSelector: { + '#matchExpressions':: d.obj(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."'), + matchExpressions: { + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values.\\nValid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn,\\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\\nthe values array must be empty. This array is replaced during a strategic\\nmerge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + }, + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { clusterTrustBundle+: { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { clusterTrustBundle+: { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { clusterTrustBundle+: { labelSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\\nmap is equivalent to an element of matchExpressions, whose key field is \\"key\\", the\\noperator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { clusterTrustBundle+: { labelSelector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"Select a single ClusterTrustBundle by object name. Mutually-exclusive\\nwith signerName and labelSelector."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { clusterTrustBundle+: { name: name } }, + '#withOptional':: d.fn(help="\"If true, don't block pod startup if the referenced ClusterTrustBundle(s)\\naren't available. If using name, then the named ClusterTrustBundle is\\nallowed not to exist. If using signerName, then the combination of\\nsignerName and labelSelector is allowed to match zero\\nClusterTrustBundles.\"", args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { clusterTrustBundle+: { optional: optional } }, + '#withPath':: d.fn(help='"Relative path from the volume root to write the bundle."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { clusterTrustBundle+: { path: path } }, + '#withSignerName':: d.fn(help='"Select all ClusterTrustBundles that match this signer name.\\nMutually-exclusive with name. The contents of all selected\\nClusterTrustBundles will be unified and deduplicated."', args=[d.arg(name='signerName', type=d.T.string)]), + withSignerName(signerName): { clusterTrustBundle+: { signerName: signerName } }, + }, + '#configMap':: d.obj(help='"configMap information about the configMap data to project"'), + configMap: { + '#items':: d.obj(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\""), + items: { + '#withKey':: d.fn(help='"key is the key to project."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withMode':: d.fn(help='"mode is Optional: mode bits used to set permissions on this file.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"path is the relative path of the file to map the key to.\\nMay not be an absolute path.\\nMay not contain the path element '..'.\\nMay not start with the string '..'.\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + }, + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { configMap+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { configMap+: { items+: if std.isArray(v=items) then items else [items] } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { configMap+: { name: name } }, + '#withOptional':: d.fn(help='"optional specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { configMap+: { optional: optional } }, + }, + '#downwardAPI':: d.obj(help='"downwardAPI information about the downwardAPI data to project"'), + downwardAPI: { + '#items':: d.obj(help='"Items is a list of DownwardAPIVolume file"'), + items: { + '#fieldRef':: d.obj(help='"Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported."'), + fieldRef: { + '#withApiVersion':: d.fn(help='"Version of the schema the FieldPath is written in terms of, defaults to \\"v1\\"."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { fieldRef+: { apiVersion: apiVersion } }, + '#withFieldPath':: d.fn(help='"Path of the field to select in the specified API version."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { fieldRef+: { fieldPath: fieldPath } }, + }, + '#resourceFieldRef':: d.obj(help='"Selects a resource of the container: only resources limits and requests\\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported."'), + resourceFieldRef: { + '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { resourceFieldRef+: { containerName: containerName } }, + '#withDivisor':: d.fn(help='"Specifies the output format of the exposed resources, defaults to \\"1\\', args=[d.arg(name='divisor', type=d.T.any)]), + withDivisor(divisor): { resourceFieldRef+: { divisor: divisor } }, + '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { resourceFieldRef+: { resource: resource } }, + }, + '#withMode':: d.fn(help='"Optional: mode bits used to set permissions on this file, must be an octal value\\nbetween 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + }, + '#withItems':: d.fn(help='"Items is a list of DownwardAPIVolume file"', args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { downwardAPI+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help='"Items is a list of DownwardAPIVolume file"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { downwardAPI+: { items+: if std.isArray(v=items) then items else [items] } }, + }, + '#secret':: d.obj(help='"secret information about the secret data to project"'), + secret: { + '#items':: d.obj(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\""), + items: { + '#withKey':: d.fn(help='"key is the key to project."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withMode':: d.fn(help='"mode is Optional: mode bits used to set permissions on this file.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"path is the relative path of the file to map the key to.\\nMay not be an absolute path.\\nMay not contain the path element '..'.\\nMay not start with the string '..'.\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + }, + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { secret+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { secret+: { items+: if std.isArray(v=items) then items else [items] } }, + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secret+: { name: name } }, + '#withOptional':: d.fn(help='"optional field specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { secret+: { optional: optional } }, + }, + '#serviceAccountToken':: d.obj(help='"serviceAccountToken is information about the serviceAccountToken data to project"'), + serviceAccountToken: { + '#withAudience':: d.fn(help='"audience is the intended audience of the token. A recipient of a token\\nmust identify itself with an identifier specified in the audience of the\\ntoken, and otherwise should reject the token. The audience defaults to the\\nidentifier of the apiserver."', args=[d.arg(name='audience', type=d.T.string)]), + withAudience(audience): { serviceAccountToken+: { audience: audience } }, + '#withExpirationSeconds':: d.fn(help='"expirationSeconds is the requested duration of validity of the service\\naccount token. As the token approaches expiration, the kubelet volume\\nplugin will proactively rotate the service account token. The kubelet will\\nstart trying to rotate the token if the token is older than 80 percent of\\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\\nand must be at least 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), + withExpirationSeconds(expirationSeconds): { serviceAccountToken+: { expirationSeconds: expirationSeconds } }, + '#withPath':: d.fn(help='"path is the path relative to the mount point of the file to project the\\ntoken into."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { serviceAccountToken+: { path: path } }, + }, + }, + '#withDefaultMode':: d.fn(help='"defaultMode are the mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { projected+: { defaultMode: defaultMode } }, + '#withSources':: d.fn(help='"sources is the list of volume projections"', args=[d.arg(name='sources', type=d.T.array)]), + withSources(sources): { projected+: { sources: if std.isArray(v=sources) then sources else [sources] } }, + '#withSourcesMixin':: d.fn(help='"sources is the list of volume projections"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sources', type=d.T.array)]), + withSourcesMixin(sources): { projected+: { sources+: if std.isArray(v=sources) then sources else [sources] } }, + }, + '#quobyte':: d.obj(help="\"quobyte represents a Quobyte mount on the host that shares a pod's lifetime\""), + quobyte: { + '#withGroup':: d.fn(help='"group to map volume access to\\nDefault is no group"', args=[d.arg(name='group', type=d.T.string)]), + withGroup(group): { quobyte+: { group: group } }, + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions.\\nDefaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { quobyte+: { readOnly: readOnly } }, + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services\\nspecified as a string as host:port pair (multiple entries are separated with commas)\\nwhich acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + withRegistry(registry): { quobyte+: { registry: registry } }, + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend\\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + withTenant(tenant): { quobyte+: { tenant: tenant } }, + '#withUser':: d.fn(help='"user to map volume access to\\nDefaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { quobyte+: { user: user } }, + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + withVolume(volume): { quobyte+: { volume: volume } }, + }, + '#rbd':: d.obj(help="\"rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md\""), + rbd: { + '#secretRef':: d.obj(help='"secretRef is name of the authentication secret for RBDUser. If provided\\noverrides keyring.\\nDefault is nil.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { rbd+: { secretRef+: { name: name } } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\\nTODO: how do we prevent errors in the filesystem from compromising the machine"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { rbd+: { fsType: fsType } }, + '#withImage':: d.fn(help='"image is the rados image name.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { rbd+: { image: image } }, + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser.\\nDefault is /etc/ceph/keyring.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + withKeyring(keyring): { rbd+: { keyring: keyring } }, + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitors(monitors): { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } }, + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitorsMixin(monitors): { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } }, + '#withPool':: d.fn(help='"pool is the rados pool name.\\nDefault is rbd.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + withPool(pool): { rbd+: { pool: pool } }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts.\\nDefaults to false.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { rbd+: { readOnly: readOnly } }, + '#withUser':: d.fn(help='"user is the rados user name.\\nDefault is admin.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { rbd+: { user: user } }, + }, + '#scaleIO':: d.obj(help='"scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes."'), + scaleIO: { + '#secretRef':: d.obj(help='"secretRef references to the secret for ScaleIO user and other\\nsensitive information. If this is not provided, Login operation will fail."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { scaleIO+: { secretRef+: { name: name } } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\".\\nDefault is \\"xfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { scaleIO+: { fsType: fsType } }, + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + withGateway(gateway): { scaleIO+: { gateway: gateway } }, + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + withProtectionDomain(protectionDomain): { scaleIO+: { protectionDomain: protectionDomain } }, + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { scaleIO+: { readOnly: readOnly } }, + '#withSslEnabled':: d.fn(help='"sslEnabled Flag enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + withSslEnabled(sslEnabled): { scaleIO+: { sslEnabled: sslEnabled } }, + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\\nDefault is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + withStorageMode(storageMode): { scaleIO+: { storageMode: storageMode } }, + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + withStoragePool(storagePool): { scaleIO+: { storagePool: storagePool } }, + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + withSystem(system): { scaleIO+: { system: system } }, + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system\\nthat is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + withVolumeName(volumeName): { scaleIO+: { volumeName: volumeName } }, + }, + '#secret':: d.obj(help='"secret represents a secret that should populate this volume.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret"'), + secret: { + '#items':: d.obj(help="\"items If unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\""), + items: { + '#withKey':: d.fn(help='"key is the key to project."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withMode':: d.fn(help='"mode is Optional: mode bits used to set permissions on this file.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"path is the relative path of the file to map the key to.\\nMay not be an absolute path.\\nMay not contain the path element '..'.\\nMay not start with the string '..'.\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + }, + '#withDefaultMode':: d.fn(help='"defaultMode is Optional: mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values\\nfor mode bits. Defaults to 0644.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { secret+: { defaultMode: defaultMode } }, + '#withItems':: d.fn(help="\"items If unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { secret+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help="\"items If unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { secret+: { items+: if std.isArray(v=items) then items else [items] } }, + '#withOptional':: d.fn(help='"optional field specify whether the Secret or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { secret+: { optional: optional } }, + '#withSecretName':: d.fn(help="\"secretName is the name of the secret in the pod's namespace to use.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\"", args=[d.arg(name='secretName', type=d.T.string)]), + withSecretName(secretName): { secret+: { secretName: secretName } }, + }, + '#storageos':: d.obj(help='"storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."'), + storageos: { + '#secretRef':: d.obj(help='"secretRef specifies the secret to use for obtaining the StorageOS API\\ncredentials. If not specified, default values will be attempted."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { storageos+: { secretRef+: { name: name } } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { storageos+: { fsType: fsType } }, + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { storageos+: { readOnly: readOnly } }, + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume\\nnames are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + withVolumeName(volumeName): { storageos+: { volumeName: volumeName } }, + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no\\nnamespace is specified then the Pod's namespace will be used. This allows the\\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\\nSet VolumeName to any name to override the default behaviour.\\nSet to \\\"default\\\" if you are not using namespaces within StorageOS.\\nNamespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + withVolumeNamespace(volumeNamespace): { storageos+: { volumeNamespace: volumeNamespace } }, + }, + '#vsphereVolume':: d.obj(help='"vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine"'), + vsphereVolume: { + '#withFsType':: d.fn(help='"fsType is filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { vsphereVolume+: { fsType: fsType } }, + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + withStoragePolicyID(storagePolicyID): { vsphereVolume+: { storagePolicyID: storagePolicyID } }, + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + withStoragePolicyName(storagePolicyName): { vsphereVolume+: { storagePolicyName: storagePolicyName } }, + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + withVolumePath(volumePath): { vsphereVolume+: { volumePath: volumePath } }, + }, + '#withName':: d.fn(help='"name of the volume.\\nMust be a DNS_LABEL and unique within the pod.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to\\nStartTime before the system will actively try to mark it failed and kill associated containers.\\nValue must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), + withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } } } } }, + '#withAutomountServiceAccountToken':: d.fn(help='"AutomountServiceAccountToken indicates whether a service account token should be automatically mounted."', args=[d.arg(name='automountServiceAccountToken', type=d.T.boolean)]), + withAutomountServiceAccountToken(automountServiceAccountToken): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { automountServiceAccountToken: automountServiceAccountToken } } } } } }, + '#withContainers':: d.fn(help='"List of containers belonging to the pod.\\nContainers cannot currently be added or removed.\\nThere must be at least one container in a Pod.\\nCannot be updated."', args=[d.arg(name='containers', type=d.T.array)]), + withContainers(containers): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { containers: if std.isArray(v=containers) then containers else [containers] } } } } } }, + '#withContainersMixin':: d.fn(help='"List of containers belonging to the pod.\\nContainers cannot currently be added or removed.\\nThere must be at least one container in a Pod.\\nCannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='containers', type=d.T.array)]), + withContainersMixin(containers): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { containers+: if std.isArray(v=containers) then containers else [containers] } } } } } }, + '#withDnsPolicy':: d.fn(help="\"Set DNS policy for the pod.\\nDefaults to \\\"ClusterFirst\\\".\\nValid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.\\nDNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.\\nTo have DNS options set along with hostNetwork, you have to specify DNS policy\\nexplicitly to 'ClusterFirstWithHostNet'.\"", args=[d.arg(name='dnsPolicy', type=d.T.string)]), + withDnsPolicy(dnsPolicy): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } } } }, + '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's\\nenvironment variables, matching the syntax of Docker links.\\nOptional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), + withEnableServiceLinks(enableServiceLinks): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } } } }, + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\\npod to perform user-initiated actions such as debugging. This list cannot be specified when\\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + withEphemeralContainers(ephemeralContainers): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } } }, + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\\npod to perform user-initiated actions such as debugging. This list cannot be specified when\\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + withEphemeralContainersMixin(ephemeralContainers): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } } }, + '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts\\nfile if specified.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), + withHostAliases(hostAliases): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } } }, + '#withHostAliasesMixin':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts\\nfile if specified.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='hostAliases', type=d.T.array)]), + withHostAliasesMixin(hostAliases): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { hostAliases+: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } } }, + '#withHostIPC':: d.fn(help="\"Use the host's ipc namespace.\\nOptional: Default to false.\"", args=[d.arg(name='hostIPC', type=d.T.boolean)]), + withHostIPC(hostIPC): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { hostIPC: hostIPC } } } } } }, + '#withHostNetwork':: d.fn(help="\"Host networking requested for this pod. Use the host's network namespace.\\nIf this option is set, the ports that will be used must be specified.\\nDefault to false.\"", args=[d.arg(name='hostNetwork', type=d.T.boolean)]), + withHostNetwork(hostNetwork): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } } } }, + '#withHostPID':: d.fn(help="\"Use the host's pid namespace.\\nOptional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), + withHostPID(hostPID): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { hostPID: hostPID } } } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace.\\nOptional: Default to true.\\nIf set to true or not present, the pod will be run in the host user namespace, useful\\nfor when the pod needs a feature only available to the host user namespace, such as\\nloading a kernel module with CAP_SYS_MODULE.\\nWhen set to false, a new userns is created for the pod. Setting false is useful for\\nmitigating container breakout vulnerabilities even allowing users to run their\\ncontainers as root without actually having root privileges on the host.\\nThis field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { hostUsers: hostUsers } } } } } }, + '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod\\nIf not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), + withHostname(hostname): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { hostname: hostname } } } } } }, + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\\nIf specified, these secrets will be passed to individual puller implementations for them to use.\\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + withImagePullSecrets(imagePullSecrets): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } } }, + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\\nIf specified, these secrets will be passed to individual puller implementations for them to use.\\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + withImagePullSecretsMixin(imagePullSecrets): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } } }, + '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod.\\nInit containers are executed in order prior to containers being started. If any\\ninit container fails, the pod is considered to have failed and is handled according\\nto its restartPolicy. The name for an init container or normal container must be\\nunique among all containers.\\nInit containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.\\nThe resourceRequirements of an init container are taken into account during scheduling\\nby finding the highest request/limit for each resource type, and then using the max of\\nof that value or the sum of the normal containers. Limits are applied to init containers\\nin a similar fashion.\\nInit containers cannot currently be added or removed.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), + withInitContainers(initContainers): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } } }, + '#withInitContainersMixin':: d.fn(help='"List of initialization containers belonging to the pod.\\nInit containers are executed in order prior to containers being started. If any\\ninit container fails, the pod is considered to have failed and is handled according\\nto its restartPolicy. The name for an init container or normal container must be\\nunique among all containers.\\nInit containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.\\nThe resourceRequirements of an init container are taken into account during scheduling\\nby finding the highest request/limit for each resource type, and then using the max of\\nof that value or the sum of the normal containers. Limits are applied to init containers\\nin a similar fashion.\\nInit containers cannot currently be added or removed.\\nCannot be updated.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='initContainers', type=d.T.array)]), + withInitContainersMixin(initContainers): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { initContainers+: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } } }, + '#withNodeName':: d.fn(help='"NodeName is a request to schedule this pod onto a specific node. If it is non-empty,\\nthe scheduler simply schedules this pod onto that node, assuming that it fits resource\\nrequirements."', args=[d.arg(name='nodeName', type=d.T.string)]), + withNodeName(nodeName): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { nodeName: nodeName } } } } } }, + '#withNodeSelector':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node.\\nSelector which must match a node's labels for the pod to be scheduled on that node.\\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), + withNodeSelector(nodeSelector): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } } } }, + '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node.\\nSelector which must match a node's labels for the pod to be scheduled on that node.\\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), + withNodeSelectorMixin(nodeSelector): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } } } }, + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.\\nThis field will be autopopulated at admission time by the RuntimeClass admission controller. If\\nthe RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.\\nThe RuntimeClass admission controller will reject Pod create requests which have the overhead already\\nset. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value\\ndefined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.\\nMore info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), + withOverhead(overhead): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { overhead: overhead } } } } } }, + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.\\nThis field will be autopopulated at admission time by the RuntimeClass admission controller. If\\nthe RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.\\nThe RuntimeClass admission controller will reject Pod create requests which have the overhead already\\nset. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value\\ndefined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.\\nMore info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + withOverheadMixin(overhead): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { overhead+: overhead } } } } } }, + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority.\\nOne of Never, PreemptLowerPriority.\\nDefaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + withPreemptionPolicy(preemptionPolicy): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } } } }, + '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the\\npriority of the pod. When Priority Admission Controller is enabled, it\\nprevents users from setting this field. The admission controller populates\\nthis field from PriorityClassName.\\nThe higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), + withPriority(priority): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { priority: priority } } } } } }, + '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and\\n\\\"system-cluster-critical\\\" are two special keywords which indicate the\\nhighest priorities with the former being the highest priority. Any other\\nname must be defined by creating a PriorityClass object with that name.\\nIf not specified, the pod priority will be default or zero if there is no\\ndefault.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), + withPriorityClassName(priorityClassName): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } } } }, + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness.\\nA pod is ready when all its containers are ready AND\\nall conditions specified in the readiness gates have status equal to \\"True\\"\\nMore info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), + withReadinessGates(readinessGates): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } } }, + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness.\\nA pod is ready when all its containers are ready AND\\nall conditions specified in the readiness gates have status equal to \\"True\\"\\nMore info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + withReadinessGatesMixin(readinessGates): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } } }, + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated\\nand reserved before the Pod is allowed to start. The resources\\nwill be made available to those containers which consume them\\nby name.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated\\nand reserved before the Pod is allowed to start. The resources\\nwill be made available to those containers which consume them\\nby name.\\n\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod.\\nOne of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.\\nDefault to Always.\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } } } }, + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used\\nto run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.\\nIf unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an\\nempty definition that uses the default runtime handler.\\nMore info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + withRuntimeClassName(runtimeClassName): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } } } }, + '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler.\\nIf not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), + withSchedulerName(schedulerName): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { schedulerName: schedulerName } } } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod.\\nIf schedulingGates is not empty, the pod will stay in the SchedulingGated state and the\\nscheduler will not attempt to schedule the pod.\\n\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod.\\nIf schedulingGates is not empty, the pod will stay in the SchedulingGated state and the\\nscheduler will not attempt to schedule the pod.\\n\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } } } }, + '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a deprecated alias for ServiceAccountName.\\nDeprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), + withServiceAccount(serviceAccount): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } } } }, + '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod.\\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), + withServiceAccountName(serviceAccountName): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { serviceAccountName: serviceAccountName } } } } } }, + '#withSetHostnameAsFQDN':: d.fn(help="\"If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).\\nIn Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).\\nIn Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\\\\\SYSTEM\\\\\\\\CurrentControlSet\\\\\\\\Services\\\\\\\\Tcpip\\\\\\\\Parameters to FQDN.\\nIf a pod does not have FQDN, this has no effect.\\nDefault to false.\"", args=[d.arg(name='setHostnameAsFQDN', type=d.T.boolean)]), + withSetHostnameAsFQDN(setHostnameAsFQDN): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { setHostnameAsFQDN: setHostnameAsFQDN } } } } } }, + '#withShareProcessNamespace':: d.fn(help='"Share a single process namespace between all of the containers in a pod.\\nWhen this is set containers will be able to view and signal processes from other containers\\nin the same pod, and the first process in each container will not be assigned PID 1.\\nHostPID and ShareProcessNamespace cannot both be set.\\nOptional: Default to false."', args=[d.arg(name='shareProcessNamespace', type=d.T.boolean)]), + withShareProcessNamespace(shareProcessNamespace): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { shareProcessNamespace: shareProcessNamespace } } } } } }, + '#withSubdomain':: d.fn(help='"If specified, the fully qualified Pod hostname will be \\"...svc.\\".\\nIf not specified, the pod will not have a domainname at all."', args=[d.arg(name='subdomain', type=d.T.string)]), + withSubdomain(subdomain): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { subdomain: subdomain } } } } } }, + '#withTerminationGracePeriodSeconds':: d.fn(help='"Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.\\nValue must be non-negative integer. The value zero indicates stop immediately via\\nthe kill signal (no opportunity to shut down).\\nIf this value is nil, the default grace period will be used instead.\\nThe grace period is the duration in seconds after the processes running in the pod are sent\\na termination signal and the time when the processes are forcibly halted with a kill signal.\\nSet this value longer than the expected cleanup time for your process.\\nDefaults to 30 seconds."', args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } } } } } }, + '#withTolerations':: d.fn(help="\"If specified, the pod's tolerations.\"", args=[d.arg(name='tolerations', type=d.T.array)]), + withTolerations(tolerations): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } } } }, + '#withTolerationsMixin':: d.fn(help="\"If specified, the pod's tolerations.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tolerations', type=d.T.array)]), + withTolerationsMixin(tolerations): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } } } }, + '#withTopologySpreadConstraints':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology\\ndomains. Scheduler will schedule pods in a way which abides by the constraints.\\nAll topologySpreadConstraints are ANDed."', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), + withTopologySpreadConstraints(topologySpreadConstraints): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { topologySpreadConstraints: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } } } }, + '#withTopologySpreadConstraintsMixin':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology\\ndomains. Scheduler will schedule pods in a way which abides by the constraints.\\nAll topologySpreadConstraints are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), + withTopologySpreadConstraintsMixin(topologySpreadConstraints): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { topologySpreadConstraints+: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } } } }, + '#withVolumes':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes"', args=[d.arg(name='volumes', type=d.T.array)]), + withVolumes(volumes): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { volumes: if std.isArray(v=volumes) then volumes else [volumes] } } } } } }, + '#withVolumesMixin':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumes', type=d.T.array)]), + withVolumesMixin(volumes): { spec+: { deploymentTemplate+: { spec+: { template+: { spec+: { volumes+: if std.isArray(v=volumes) then volumes else [volumes] } } } } } }, + }, + }, + '#withMinReadySeconds':: d.fn(help='"Minimum number of seconds for which a newly created pod should be ready\\nwithout any of its container crashing, for it to be considered available.\\nDefaults to 0 (pod will be considered available as soon as it is ready)"', args=[d.arg(name='minReadySeconds', type=d.T.integer)]), + withMinReadySeconds(minReadySeconds): { spec+: { deploymentTemplate+: { spec+: { minReadySeconds: minReadySeconds } } } }, + '#withPaused':: d.fn(help='"Indicates that the deployment is paused."', args=[d.arg(name='paused', type=d.T.boolean)]), + withPaused(paused): { spec+: { deploymentTemplate+: { spec+: { paused: paused } } } }, + '#withProgressDeadlineSeconds':: d.fn(help='"The maximum time in seconds for a deployment to make progress before it\\nis considered to be failed. The deployment controller will continue to\\nprocess failed deployments and a condition with a ProgressDeadlineExceeded\\nreason will be surfaced in the deployment status. Note that progress will\\nnot be estimated during the time a deployment is paused. Defaults to 600s."', args=[d.arg(name='progressDeadlineSeconds', type=d.T.integer)]), + withProgressDeadlineSeconds(progressDeadlineSeconds): { spec+: { deploymentTemplate+: { spec+: { progressDeadlineSeconds: progressDeadlineSeconds } } } }, + '#withReplicas':: d.fn(help='"Number of desired pods. This is a pointer to distinguish between explicit\\nzero and not specified. Defaults to 1."', args=[d.arg(name='replicas', type=d.T.integer)]), + withReplicas(replicas): { spec+: { deploymentTemplate+: { spec+: { replicas: replicas } } } }, + '#withRevisionHistoryLimit':: d.fn(help='"The number of old ReplicaSets to retain to allow rollback.\\nThis is a pointer to distinguish between explicit zero and not specified.\\nDefaults to 10."', args=[d.arg(name='revisionHistoryLimit', type=d.T.integer)]), + withRevisionHistoryLimit(revisionHistoryLimit): { spec+: { deploymentTemplate+: { spec+: { revisionHistoryLimit: revisionHistoryLimit } } } }, + }, + }, + '#serviceAccountTemplate':: d.obj(help='"ServiceAccountTemplate is the template for the ServiceAccount object."'), + serviceAccountTemplate: { + '#metadata':: d.obj(help='"Metadata contains the configurable metadata fields for the ServiceAccount."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that\\nmay be set by external tools to store and retrieve arbitrary metadata.\\nThey are not queryable and should be preserved when modifying objects.\\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { spec+: { serviceAccountTemplate+: { metadata+: { annotations: annotations } } } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that\\nmay be set by external tools to store and retrieve arbitrary metadata.\\nThey are not queryable and should be preserved when modifying objects.\\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { spec+: { serviceAccountTemplate+: { metadata+: { annotations+: annotations } } } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. Labels will be merged with internal labels\\nused by crossplane, and labels with a crossplane.io key might be\\noverwritten.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { spec+: { serviceAccountTemplate+: { metadata+: { labels: labels } } } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. Labels will be merged with internal labels\\nused by crossplane, and labels with a crossplane.io key might be\\noverwritten.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { spec+: { serviceAccountTemplate+: { metadata+: { labels+: labels } } } }, + '#withName':: d.fn(help='"Name is the name of the object."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { serviceAccountTemplate+: { metadata+: { name: name } } } }, + }, + }, + '#serviceTemplate':: d.obj(help='"ServiceTemplate is the template for the Service object."'), + serviceTemplate: { + '#metadata':: d.obj(help='"Metadata contains the configurable metadata fields for the Service."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that\\nmay be set by external tools to store and retrieve arbitrary metadata.\\nThey are not queryable and should be preserved when modifying objects.\\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { spec+: { serviceTemplate+: { metadata+: { annotations: annotations } } } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that\\nmay be set by external tools to store and retrieve arbitrary metadata.\\nThey are not queryable and should be preserved when modifying objects.\\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { spec+: { serviceTemplate+: { metadata+: { annotations+: annotations } } } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. Labels will be merged with internal labels\\nused by crossplane, and labels with a crossplane.io key might be\\noverwritten.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { spec+: { serviceTemplate+: { metadata+: { labels: labels } } } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. Labels will be merged with internal labels\\nused by crossplane, and labels with a crossplane.io key might be\\noverwritten.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { spec+: { serviceTemplate+: { metadata+: { labels+: labels } } } }, + '#withName':: d.fn(help='"Name is the name of the object."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { serviceTemplate+: { metadata+: { name: name } } } }, + }, + }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/pkg/v1beta1/function.libsonnet b/crossplane/1.17/_gen/pkg/v1beta1/function.libsonnet new file mode 100644 index 0000000..c8ef62a --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1beta1/function.libsonnet @@ -0,0 +1,96 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='function', url='', help='"A Function installs an OCI compatible Crossplane package, extending\\nCrossplane with support for a new kind of composition function.\\n\\n\\nRead the Crossplane documentation for\\n[more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions)."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Function', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'pkg.crossplane.io/v1beta1', + kind: 'Function', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"FunctionSpec specifies the configuration of a Function."'), + spec: { + '#controllerConfigRef':: d.obj(help='"ControllerConfigRef references a ControllerConfig resource that will be\\nused to configure the packaged controller Deployment.\\nDeprecated: Use RuntimeConfigReference instead."'), + controllerConfigRef: { + '#withName':: d.fn(help='"Name of the ControllerConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { controllerConfigRef+: { name: name } } }, + }, + '#packagePullSecrets':: d.obj(help='"PackagePullSecrets are named secrets in the same namespace that can be used\\nto fetch packages from private registries."'), + packagePullSecrets: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#runtimeConfigRef':: d.obj(help='"RuntimeConfigRef references a RuntimeConfig resource that will be used\\nto configure the package runtime."'), + runtimeConfigRef: { + '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { runtimeConfigRef+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind of the referent."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { runtimeConfigRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name of the RuntimeConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { runtimeConfigRef+: { name: name } } }, + }, + '#withCommonLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabels(commonLabels): { spec+: { commonLabels: commonLabels } }, + '#withCommonLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabelsMixin(commonLabels): { spec+: { commonLabels+: commonLabels } }, + '#withIgnoreCrossplaneConstraints':: d.fn(help='"IgnoreCrossplaneConstraints indicates to the package manager whether to\\nhonor Crossplane version constrains specified by the package.\\nDefault is false."', args=[d.arg(name='ignoreCrossplaneConstraints', type=d.T.boolean)]), + withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints): { spec+: { ignoreCrossplaneConstraints: ignoreCrossplaneConstraints } }, + '#withPackage':: d.fn(help='"Package is the name of the package that is being requested."', args=[d.arg(name='package', type=d.T.string)]), + withPackage(package): { spec+: { package: package } }, + '#withPackagePullPolicy':: d.fn(help='"PackagePullPolicy defines the pull policy for the package.\\nDefault is IfNotPresent."', args=[d.arg(name='packagePullPolicy', type=d.T.string)]), + withPackagePullPolicy(packagePullPolicy): { spec+: { packagePullPolicy: packagePullPolicy } }, + '#withPackagePullSecrets':: d.fn(help='"PackagePullSecrets are named secrets in the same namespace that can be used\\nto fetch packages from private registries."', args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecrets(packagePullSecrets): { spec+: { packagePullSecrets: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withPackagePullSecretsMixin':: d.fn(help='"PackagePullSecrets are named secrets in the same namespace that can be used\\nto fetch packages from private registries."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecretsMixin(packagePullSecrets): { spec+: { packagePullSecrets+: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withRevisionActivationPolicy':: d.fn(help='"RevisionActivationPolicy specifies how the package controller should\\nupdate from one revision to the next. Options are Automatic or Manual.\\nDefault is Automatic."', args=[d.arg(name='revisionActivationPolicy', type=d.T.string)]), + withRevisionActivationPolicy(revisionActivationPolicy): { spec+: { revisionActivationPolicy: revisionActivationPolicy } }, + '#withRevisionHistoryLimit':: d.fn(help='"RevisionHistoryLimit dictates how the package controller cleans up old\\ninactive package revisions.\\nDefaults to 1. Can be disabled by explicitly setting to 0."', args=[d.arg(name='revisionHistoryLimit', type=d.T.integer)]), + withRevisionHistoryLimit(revisionHistoryLimit): { spec+: { revisionHistoryLimit: revisionHistoryLimit } }, + '#withSkipDependencyResolution':: d.fn(help='"SkipDependencyResolution indicates to the package manager whether to skip\\nresolving dependencies for a package. Setting this value to true may have\\nunintended consequences.\\nDefault is false."', args=[d.arg(name='skipDependencyResolution', type=d.T.boolean)]), + withSkipDependencyResolution(skipDependencyResolution): { spec+: { skipDependencyResolution: skipDependencyResolution } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/pkg/v1beta1/functionRevision.libsonnet b/crossplane/1.17/_gen/pkg/v1beta1/functionRevision.libsonnet new file mode 100644 index 0000000..a22c150 --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1beta1/functionRevision.libsonnet @@ -0,0 +1,100 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='functionRevision', url='', help="\"A FunctionRevision represents a revision of a Function. Crossplane\\ncreates new revisions when there are changes to the Function.\\n\\n\\nCrossplane creates and manages FunctionRevisions. Don't directly edit\\nFunctionRevisions.\""), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of FunctionRevision', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'pkg.crossplane.io/v1beta1', + kind: 'FunctionRevision', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#spec':: d.obj(help='"FunctionRevisionSpec specifies configuration for a FunctionRevision."'), + spec: { + '#controllerConfigRef':: d.obj(help='"ControllerConfigRef references a ControllerConfig resource that will be\\nused to configure the packaged controller Deployment.\\nDeprecated: Use RuntimeConfigReference instead."'), + controllerConfigRef: { + '#withName':: d.fn(help='"Name of the ControllerConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { controllerConfigRef+: { name: name } } }, + }, + '#packagePullSecrets':: d.obj(help="\"PackagePullSecrets are named secrets in the same namespace that can be\\nused to fetch packages from private registries. They are also applied to\\nany images pulled for the package, such as a provider's controller image.\""), + packagePullSecrets: { + '#withName':: d.fn(help='"Name of the referent.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\\nTODO: Add other useful fields. apiVersion, kind, uid?"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#runtimeConfigRef':: d.obj(help='"RuntimeConfigRef references a RuntimeConfig resource that will be used\\nto configure the package runtime."'), + runtimeConfigRef: { + '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { runtimeConfigRef+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind of the referent."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { runtimeConfigRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name of the RuntimeConfig."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { runtimeConfigRef+: { name: name } } }, + }, + '#withCommonLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabels(commonLabels): { spec+: { commonLabels: commonLabels } }, + '#withCommonLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize\\n(scope and select) objects. May match selectors of replication controllers\\nand services.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='commonLabels', type=d.T.object)]), + withCommonLabelsMixin(commonLabels): { spec+: { commonLabels+: commonLabels } }, + '#withDesiredState':: d.fn(help='"DesiredState of the PackageRevision. Can be either Active or Inactive."', args=[d.arg(name='desiredState', type=d.T.string)]), + withDesiredState(desiredState): { spec+: { desiredState: desiredState } }, + '#withIgnoreCrossplaneConstraints':: d.fn(help='"IgnoreCrossplaneConstraints indicates to the package manager whether to\\nhonor Crossplane version constrains specified by the package.\\nDefault is false."', args=[d.arg(name='ignoreCrossplaneConstraints', type=d.T.boolean)]), + withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints): { spec+: { ignoreCrossplaneConstraints: ignoreCrossplaneConstraints } }, + '#withImage':: d.fn(help='"Package image used by install Pod to extract package contents."', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { spec+: { image: image } }, + '#withPackagePullPolicy':: d.fn(help="\"PackagePullPolicy defines the pull policy for the package. It is also\\napplied to any images pulled for the package, such as a provider's\\ncontroller image.\\nDefault is IfNotPresent.\"", args=[d.arg(name='packagePullPolicy', type=d.T.string)]), + withPackagePullPolicy(packagePullPolicy): { spec+: { packagePullPolicy: packagePullPolicy } }, + '#withPackagePullSecrets':: d.fn(help="\"PackagePullSecrets are named secrets in the same namespace that can be\\nused to fetch packages from private registries. They are also applied to\\nany images pulled for the package, such as a provider's controller image.\"", args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecrets(packagePullSecrets): { spec+: { packagePullSecrets: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withPackagePullSecretsMixin':: d.fn(help="\"PackagePullSecrets are named secrets in the same namespace that can be\\nused to fetch packages from private registries. They are also applied to\\nany images pulled for the package, such as a provider's controller image.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='packagePullSecrets', type=d.T.array)]), + withPackagePullSecretsMixin(packagePullSecrets): { spec+: { packagePullSecrets+: if std.isArray(v=packagePullSecrets) then packagePullSecrets else [packagePullSecrets] } }, + '#withRevision':: d.fn(help="\"Revision number. Indicates when the revision will be garbage collected\\nbased on the parent's RevisionHistoryLimit.\"", args=[d.arg(name='revision', type=d.T.integer)]), + withRevision(revision): { spec+: { revision: revision } }, + '#withSkipDependencyResolution':: d.fn(help='"SkipDependencyResolution indicates to the package manager whether to skip\\nresolving dependencies for a package. Setting this value to true may have\\nunintended consequences.\\nDefault is false."', args=[d.arg(name='skipDependencyResolution', type=d.T.boolean)]), + withSkipDependencyResolution(skipDependencyResolution): { spec+: { skipDependencyResolution: skipDependencyResolution } }, + '#withTlsClientSecretName':: d.fn(help='"TLSClientSecretName is the name of the TLS Secret that stores client\\ncertificates of the Provider."', args=[d.arg(name='tlsClientSecretName', type=d.T.string)]), + withTlsClientSecretName(tlsClientSecretName): { spec+: { tlsClientSecretName: tlsClientSecretName } }, + '#withTlsServerSecretName':: d.fn(help='"TLSServerSecretName is the name of the TLS Secret that stores server\\ncertificates of the Provider."', args=[d.arg(name='tlsServerSecretName', type=d.T.string)]), + withTlsServerSecretName(tlsServerSecretName): { spec+: { tlsServerSecretName: tlsServerSecretName } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/pkg/v1beta1/lock.libsonnet b/crossplane/1.17/_gen/pkg/v1beta1/lock.libsonnet new file mode 100644 index 0000000..b6e5e36 --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1beta1/lock.libsonnet @@ -0,0 +1,82 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='lock', url='', help='"Lock is the CRD type that tracks package dependencies."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Lock', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'pkg.crossplane.io/v1beta1', + kind: 'Lock', + } + self.metadata.withName(name=name) + self.metadata.withAnnotations(annotations={ + 'tanka.dev/namespaced': 'false', + }), + '#packages':: d.obj(help=''), + packages: { + '#dependencies':: d.obj(help='"Dependencies are the list of dependencies of this package. The order of\\nthe dependencies will dictate the order in which they are resolved."'), + dependencies: { + '#withConstraints':: d.fn(help='"Constraints is a valid semver range, which will be used to select a valid\\ndependency version."', args=[d.arg(name='constraints', type=d.T.string)]), + withConstraints(constraints): { constraints: constraints }, + '#withPackage':: d.fn(help='"Package is the OCI image name without a tag or digest."', args=[d.arg(name='package', type=d.T.string)]), + withPackage(package): { package: package }, + '#withType':: d.fn(help='"Type is the type of package. Can be either Configuration or Provider."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withDependencies':: d.fn(help='"Dependencies are the list of dependencies of this package. The order of\\nthe dependencies will dictate the order in which they are resolved."', args=[d.arg(name='dependencies', type=d.T.array)]), + withDependencies(dependencies): { dependencies: if std.isArray(v=dependencies) then dependencies else [dependencies] }, + '#withDependenciesMixin':: d.fn(help='"Dependencies are the list of dependencies of this package. The order of\\nthe dependencies will dictate the order in which they are resolved."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='dependencies', type=d.T.array)]), + withDependenciesMixin(dependencies): { dependencies+: if std.isArray(v=dependencies) then dependencies else [dependencies] }, + '#withName':: d.fn(help='"Name corresponds to the name of the package revision for this package."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withSource':: d.fn(help='"Source is the OCI image name without a tag or digest."', args=[d.arg(name='source', type=d.T.string)]), + withSource(source): { source: source }, + '#withType':: d.fn(help='"Type is the type of package. Can be either Configuration or Provider."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#withVersion':: d.fn(help='"Version is the tag or digest of the OCI image."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { version: version }, + }, + '#withPackages':: d.fn(help='', args=[d.arg(name='packages', type=d.T.array)]), + withPackages(packages): { packages: if std.isArray(v=packages) then packages else [packages] }, + '#withPackagesMixin':: d.fn(help='\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='packages', type=d.T.array)]), + withPackagesMixin(packages): { packages+: if std.isArray(v=packages) then packages else [packages] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/crossplane/1.17/_gen/pkg/v1beta1/main.libsonnet b/crossplane/1.17/_gen/pkg/v1beta1/main.libsonnet new file mode 100644 index 0000000..8e3d726 --- /dev/null +++ b/crossplane/1.17/_gen/pkg/v1beta1/main.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1beta1', url='', help=''), + deploymentRuntimeConfig: (import 'deploymentRuntimeConfig.libsonnet'), + 'function': (import 'function.libsonnet'), + functionRevision: (import 'functionRevision.libsonnet'), + lock: (import 'lock.libsonnet'), +} diff --git a/crossplane/1.17/gen.libsonnet b/crossplane/1.17/gen.libsonnet new file mode 100644 index 0000000..91ee733 --- /dev/null +++ b/crossplane/1.17/gen.libsonnet @@ -0,0 +1,7 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='crossplane', url='github.com/jsonnet-libs/crossplane-core-libsonnet/crossplane/1.17/main.libsonnet', help=''), + apiextensions:: (import '_gen/apiextensions/main.libsonnet'), + meta:: (import '_gen/meta/main.libsonnet'), + pkg:: (import '_gen/pkg/main.libsonnet'), +} diff --git a/crossplane/1.17/main.libsonnet b/crossplane/1.17/main.libsonnet new file mode 100644 index 0000000..5b8aeaa --- /dev/null +++ b/crossplane/1.17/main.libsonnet @@ -0,0 +1 @@ +(import 'gen.libsonnet') + (import '_custom/compositeResourceDefinition.libsonnet') + (import '_custom/composition.libsonnet') + (import '_custom/resource.libsonnet') diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..0a625f5 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,7 @@ +# crossplane-core Jsonnet library + +This library is generated with [`k8s`](https://github.com/jsonnet-libs/k8s). + +- [crossplane/1.17](crossplane/1.17/README.md) +- [function-patch-and-transform/0.7](function-patch-and-transform/0.7/README.md) +- [function-cel-filter/0.1](function-cel-filter/0.1/README.md) diff --git a/docs/crossplane/1.17/README.md b/docs/crossplane/1.17/README.md new file mode 100644 index 0000000..3ab0ece --- /dev/null +++ b/docs/crossplane/1.17/README.md @@ -0,0 +1,16 @@ +--- +permalink: /crossplane/1.17/ +--- + +# crossplane + +```jsonnet +local crossplane = import "github.com/jsonnet-libs/crossplane-core-libsonnet/crossplane/1.17/main.libsonnet" +``` + + + +* [apiextensions](apiextensions/index.md) +* [meta](meta/index.md) +* [pkg](pkg/index.md) +* [util](util/index.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/apiextensions/index.md b/docs/crossplane/1.17/apiextensions/index.md new file mode 100644 index 0000000..4005859 --- /dev/null +++ b/docs/crossplane/1.17/apiextensions/index.md @@ -0,0 +1,11 @@ +--- +permalink: /crossplane/1.17/apiextensions/ +--- + +# apiextensions + + + +* [v1](v1/index.md) +* [v1alpha1](v1alpha1/index.md) +* [v1beta1](v1beta1/index.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/apiextensions/v1/compositeResourceDefinition.md b/docs/crossplane/1.17/apiextensions/v1/compositeResourceDefinition.md new file mode 100644 index 0000000..0191e21 --- /dev/null +++ b/docs/crossplane/1.17/apiextensions/v1/compositeResourceDefinition.md @@ -0,0 +1,816 @@ +--- +permalink: /crossplane/1.17/apiextensions/v1/compositeResourceDefinition/ +--- + +# apiextensions.v1.compositeResourceDefinition + +"A CompositeResourceDefinition defines the schema for a new custom Kubernetes\nAPI.\n\n\nRead the Crossplane documentation for\n[more information about CustomResourceDefinitions](https://docs.crossplane.io/latest/concepts/composite-resource-definitions)." + +## Index + +* [`fn new(kind, plural, group)`](#fn-new) +* [`fn mapVersions(kind, plural)`](#fn-mapversions) +* [`fn withClaimNames(kind, plural)`](#fn-withclaimnames) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withConnectionSecretKeys(connectionSecretKeys)`](#fn-specwithconnectionsecretkeys) + * [`fn withConnectionSecretKeysMixin(connectionSecretKeys)`](#fn-specwithconnectionsecretkeysmixin) + * [`fn withDefaultCompositeDeletePolicy(defaultCompositeDeletePolicy)`](#fn-specwithdefaultcompositedeletepolicy) + * [`fn withDefaultCompositionUpdatePolicy(defaultCompositionUpdatePolicy)`](#fn-specwithdefaultcompositionupdatepolicy) + * [`fn withGroup(group)`](#fn-specwithgroup) + * [`fn withVersions(versions)`](#fn-specwithversions) + * [`fn withVersionsMixin(versions)`](#fn-specwithversionsmixin) + * [`obj spec.claimNames`](#obj-specclaimnames) + * [`fn withCategories(categories)`](#fn-specclaimnameswithcategories) + * [`fn withCategoriesMixin(categories)`](#fn-specclaimnameswithcategoriesmixin) + * [`fn withKind(kind)`](#fn-specclaimnameswithkind) + * [`fn withListKind(listKind)`](#fn-specclaimnameswithlistkind) + * [`fn withPlural(plural)`](#fn-specclaimnameswithplural) + * [`fn withShortNames(shortNames)`](#fn-specclaimnameswithshortnames) + * [`fn withShortNamesMixin(shortNames)`](#fn-specclaimnameswithshortnamesmixin) + * [`fn withSingular(singular)`](#fn-specclaimnameswithsingular) + * [`obj spec.conversion`](#obj-specconversion) + * [`fn withStrategy(strategy)`](#fn-specconversionwithstrategy) + * [`obj spec.conversion.webhook`](#obj-specconversionwebhook) + * [`fn withConversionReviewVersions(conversionReviewVersions)`](#fn-specconversionwebhookwithconversionreviewversions) + * [`fn withConversionReviewVersionsMixin(conversionReviewVersions)`](#fn-specconversionwebhookwithconversionreviewversionsmixin) + * [`obj spec.conversion.webhook.clientConfig`](#obj-specconversionwebhookclientconfig) + * [`fn withCaBundle(caBundle)`](#fn-specconversionwebhookclientconfigwithcabundle) + * [`fn withUrl(url)`](#fn-specconversionwebhookclientconfigwithurl) + * [`obj spec.conversion.webhook.clientConfig.service`](#obj-specconversionwebhookclientconfigservice) + * [`fn withName(name)`](#fn-specconversionwebhookclientconfigservicewithname) + * [`fn withNamespace(namespace)`](#fn-specconversionwebhookclientconfigservicewithnamespace) + * [`fn withPath(path)`](#fn-specconversionwebhookclientconfigservicewithpath) + * [`fn withPort(port)`](#fn-specconversionwebhookclientconfigservicewithport) + * [`obj spec.defaultCompositionRef`](#obj-specdefaultcompositionref) + * [`fn withName(name)`](#fn-specdefaultcompositionrefwithname) + * [`obj spec.enforcedCompositionRef`](#obj-specenforcedcompositionref) + * [`fn withName(name)`](#fn-specenforcedcompositionrefwithname) + * [`obj spec.metadata`](#obj-specmetadata) + * [`fn withAnnotations(annotations)`](#fn-specmetadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-specmetadatawithannotationsmixin) + * [`fn withLabels(labels)`](#fn-specmetadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-specmetadatawithlabelsmixin) + * [`obj spec.names`](#obj-specnames) + * [`fn withCategories(categories)`](#fn-specnameswithcategories) + * [`fn withCategoriesMixin(categories)`](#fn-specnameswithcategoriesmixin) + * [`fn withKind(kind)`](#fn-specnameswithkind) + * [`fn withListKind(listKind)`](#fn-specnameswithlistkind) + * [`fn withPlural(plural)`](#fn-specnameswithplural) + * [`fn withShortNames(shortNames)`](#fn-specnameswithshortnames) + * [`fn withShortNamesMixin(shortNames)`](#fn-specnameswithshortnamesmixin) + * [`fn withSingular(singular)`](#fn-specnameswithsingular) + * [`obj spec.versions`](#obj-specversions) + * [`fn withAdditionalPrinterColumns(additionalPrinterColumns)`](#fn-specversionswithadditionalprintercolumns) + * [`fn withAdditionalPrinterColumnsMixin(additionalPrinterColumns)`](#fn-specversionswithadditionalprintercolumnsmixin) + * [`fn withDeprecated(deprecated)`](#fn-specversionswithdeprecated) + * [`fn withDeprecationWarning(deprecationWarning)`](#fn-specversionswithdeprecationwarning) + * [`fn withName(name)`](#fn-specversionswithname) + * [`fn withReferenceable(referenceable)`](#fn-specversionswithreferenceable) + * [`fn withServed(served)`](#fn-specversionswithserved) + * [`obj spec.versions.additionalPrinterColumns`](#obj-specversionsadditionalprintercolumns) + * [`fn withDescription(description)`](#fn-specversionsadditionalprintercolumnswithdescription) + * [`fn withFormat(format)`](#fn-specversionsadditionalprintercolumnswithformat) + * [`fn withJsonPath(jsonPath)`](#fn-specversionsadditionalprintercolumnswithjsonpath) + * [`fn withName(name)`](#fn-specversionsadditionalprintercolumnswithname) + * [`fn withPriority(priority)`](#fn-specversionsadditionalprintercolumnswithpriority) + * [`fn withType(type)`](#fn-specversionsadditionalprintercolumnswithtype) + * [`obj spec.versions.schema`](#obj-specversionsschema) + * [`fn withOpenAPIV3Schema(openAPIV3Schema)`](#fn-specversionsschemawithopenapiv3schema) + * [`fn withOpenAPIV3SchemaMixin(openAPIV3Schema)`](#fn-specversionsschemawithopenapiv3schemamixin) + +## Fields + +### fn new + +```ts +new(kind, plural, group) +``` + +new returns an instance of CompositeResourceDefinition= + +For example: xpostgresqlinstances.example.org + +- `kind`: XPostgreSQLInstance +- `plural`: xpostgresqlinstances +- `group`: example.org + +A common convention is that the XR (composite resource) are prefixed with 'X' +while claim names are not. This lets app team members think of creating a claim +as (e.g.) 'creating a PostgreSQLInstance'. Use `withClaimNames` to set this. + + +### fn mapVersions + +```ts +mapVersions(kind, plural) +``` + +Sets the ClaimNames attribute. + +Example: +- `kind`: PostgreSQLInstance +- `plural`: postgresqlinstances + +A common convention is that the XR (composite resource) are prefixed with 'X' +while claim names are not. This lets app team members think of creating a claim +as (e.g.) 'creating a PostgreSQLInstance'. + + +### fn withClaimNames + +```ts +withClaimNames(kind, plural) +``` + +Sets the ClaimNames attribute. + +Example: +- `kind`: PostgreSQLInstance +- `plural`: postgresqlinstances + +A common convention is that the XR (composite resource) are prefixed with 'X' +while claim names are not. This lets app team members think of creating a claim +as (e.g.) 'creating a PostgreSQLInstance'. + + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"CompositeResourceDefinitionSpec specifies the desired state of the definition." + +### fn spec.withConnectionSecretKeys + +```ts +withConnectionSecretKeys(connectionSecretKeys) +``` + +"ConnectionSecretKeys is the list of keys that will be exposed to the end\nuser of the defined kind.\nIf the list is empty, all keys will be published." + +### fn spec.withConnectionSecretKeysMixin + +```ts +withConnectionSecretKeysMixin(connectionSecretKeys) +``` + +"ConnectionSecretKeys is the list of keys that will be exposed to the end\nuser of the defined kind.\nIf the list is empty, all keys will be published." + +**Note:** This function appends passed data to existing values + +### fn spec.withDefaultCompositeDeletePolicy + +```ts +withDefaultCompositeDeletePolicy(defaultCompositeDeletePolicy) +``` + +"DefaultCompositeDeletePolicy is the policy used when deleting the Composite\nthat is associated with the Claim if no policy has been specified." + +### fn spec.withDefaultCompositionUpdatePolicy + +```ts +withDefaultCompositionUpdatePolicy(defaultCompositionUpdatePolicy) +``` + +"DefaultCompositionUpdatePolicy is the policy used when updating composites after a new\nComposition Revision has been created if no policy has been specified on the composite." + +### fn spec.withGroup + +```ts +withGroup(group) +``` + +"Group specifies the API group of the defined composite resource.\nComposite resources are served under `/apis//...`. Must match the\nname of the XRD (in the form `.`)." + +### fn spec.withVersions + +```ts +withVersions(versions) +``` + +"Versions is the list of all API versions of the defined composite\nresource. Version names are used to compute the order in which served\nversions are listed in API discovery. If the version string is\n\"kube-like\", it will sort above non \"kube-like\" version strings, which\nare ordered lexicographically. \"Kube-like\" versions start with a \"v\",\nthen are followed by a number (the major version), then optionally the\nstring \"alpha\" or \"beta\" and another number (the minor version). These\nare sorted first by GA > beta > alpha (where GA is a version with no\nsuffix such as beta or alpha), and then by comparing major version, then\nminor version. An example sorted list of versions: v10, v2, v1, v11beta2,\nv10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10." + +### fn spec.withVersionsMixin + +```ts +withVersionsMixin(versions) +``` + +"Versions is the list of all API versions of the defined composite\nresource. Version names are used to compute the order in which served\nversions are listed in API discovery. If the version string is\n\"kube-like\", it will sort above non \"kube-like\" version strings, which\nare ordered lexicographically. \"Kube-like\" versions start with a \"v\",\nthen are followed by a number (the major version), then optionally the\nstring \"alpha\" or \"beta\" and another number (the minor version). These\nare sorted first by GA > beta > alpha (where GA is a version with no\nsuffix such as beta or alpha), and then by comparing major version, then\nminor version. An example sorted list of versions: v10, v2, v1, v11beta2,\nv10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10." + +**Note:** This function appends passed data to existing values + +## obj spec.claimNames + +"ClaimNames specifies the names of an optional composite resource claim.\nWhen claim names are specified Crossplane will create a namespaced\n'composite resource claim' CRD that corresponds to the defined composite\nresource. This composite resource claim acts as a namespaced proxy for\nthe composite resource; creating, updating, or deleting the claim will\ncreate, update, or delete a corresponding composite resource. You may add\nclaim names to an existing CompositeResourceDefinition, but they cannot\nbe changed or removed once they have been set." + +### fn spec.claimNames.withCategories + +```ts +withCategories(categories) +``` + +"categories is a list of grouped resources this custom resource belongs to (e.g. 'all').\nThis is published in API discovery documents, and used by clients to support invocations like\n`kubectl get all`." + +### fn spec.claimNames.withCategoriesMixin + +```ts +withCategoriesMixin(categories) +``` + +"categories is a list of grouped resources this custom resource belongs to (e.g. 'all').\nThis is published in API discovery documents, and used by clients to support invocations like\n`kubectl get all`." + +**Note:** This function appends passed data to existing values + +### fn spec.claimNames.withKind + +```ts +withKind(kind) +``` + +"kind is the serialized kind of the resource. It is normally CamelCase and singular.\nCustom resource instances will use this value as the `kind` attribute in API calls." + +### fn spec.claimNames.withListKind + +```ts +withListKind(listKind) +``` + +"listKind is the serialized kind of the list for this resource. Defaults to \"`kind`List\"." + +### fn spec.claimNames.withPlural + +```ts +withPlural(plural) +``` + +"plural is the plural name of the resource to serve.\nThe custom resources are served under `/apis///.../`.\nMust match the name of the CustomResourceDefinition (in the form `.`).\nMust be all lowercase." + +### fn spec.claimNames.withShortNames + +```ts +withShortNames(shortNames) +``` + +"shortNames are short names for the resource, exposed in API discovery documents,\nand used by clients to support invocations like `kubectl get `.\nIt must be all lowercase." + +### fn spec.claimNames.withShortNamesMixin + +```ts +withShortNamesMixin(shortNames) +``` + +"shortNames are short names for the resource, exposed in API discovery documents,\nand used by clients to support invocations like `kubectl get `.\nIt must be all lowercase." + +**Note:** This function appends passed data to existing values + +### fn spec.claimNames.withSingular + +```ts +withSingular(singular) +``` + +"singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`." + +## obj spec.conversion + +"Conversion defines all conversion settings for the defined Composite resource." + +### fn spec.conversion.withStrategy + +```ts +withStrategy(strategy) +``` + +"strategy specifies how custom resources are converted between versions. Allowed values are:\n- `\"None\"`: The converter only change the apiVersion and would not touch any other field in the custom resource.\n- `\"Webhook\"`: API Server will call to an external webhook to do the conversion. Additional information\n is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set." + +## obj spec.conversion.webhook + +"webhook describes how to call the conversion webhook. Required when `strategy` is set to `\"Webhook\"`." + +### fn spec.conversion.webhook.withConversionReviewVersions + +```ts +withConversionReviewVersions(conversionReviewVersions) +``` + +"conversionReviewVersions is an ordered list of preferred `ConversionReview`\nversions the Webhook expects. The API server will use the first version in\nthe list which it supports. If none of the versions specified in this list\nare supported by API server, conversion will fail for the custom resource.\nIf a persisted Webhook configuration specifies allowed versions and does not\ninclude any versions known to the API Server, calls to the webhook will fail." + +### fn spec.conversion.webhook.withConversionReviewVersionsMixin + +```ts +withConversionReviewVersionsMixin(conversionReviewVersions) +``` + +"conversionReviewVersions is an ordered list of preferred `ConversionReview`\nversions the Webhook expects. The API server will use the first version in\nthe list which it supports. If none of the versions specified in this list\nare supported by API server, conversion will fail for the custom resource.\nIf a persisted Webhook configuration specifies allowed versions and does not\ninclude any versions known to the API Server, calls to the webhook will fail." + +**Note:** This function appends passed data to existing values + +## obj spec.conversion.webhook.clientConfig + +"clientConfig is the instructions for how to call the webhook if strategy is `Webhook`." + +### fn spec.conversion.webhook.clientConfig.withCaBundle + +```ts +withCaBundle(caBundle) +``` + +"caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.\nIf unspecified, system trust roots on the apiserver are used." + +### fn spec.conversion.webhook.clientConfig.withUrl + +```ts +withUrl(url) +``` + +"url gives the location of the webhook, in standard URL form\n(`scheme://host:port/path`). Exactly one of `url` or `service`\nmust be specified.\n\n\nThe `host` should not refer to a service running in the cluster; use\nthe `service` field instead. The host might be resolved via external\nDNS in some apiservers (e.g., `kube-apiserver` cannot resolve\nin-cluster DNS as that would be a layering violation). `host` may\nalso be an IP address.\n\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is\nrisky unless you take great care to run this webhook on all hosts\nwhich run an apiserver which might need to make calls to this\nwebhook. Such installs are likely to be non-portable, i.e., not easy\nto turn up in a new cluster.\n\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\n\nA path is optional, and if present may be any string permissible in\na URL. You may use the path to pass an arbitrary string to the\nwebhook, for example, a cluster identifier.\n\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not\nallowed. Fragments (\"#...\") and query parameters (\"?...\") are not\nallowed, either." + +## obj spec.conversion.webhook.clientConfig.service + +"service is a reference to the service for this webhook. Either\nservice or url must be specified.\n\n\nIf the webhook is running within the cluster, then you should use `service`." + +### fn spec.conversion.webhook.clientConfig.service.withName + +```ts +withName(name) +``` + +"name is the name of the service.\nRequired" + +### fn spec.conversion.webhook.clientConfig.service.withNamespace + +```ts +withNamespace(namespace) +``` + +"namespace is the namespace of the service.\nRequired" + +### fn spec.conversion.webhook.clientConfig.service.withPath + +```ts +withPath(path) +``` + +"path is an optional URL path at which the webhook will be contacted." + +### fn spec.conversion.webhook.clientConfig.service.withPort + +```ts +withPort(port) +``` + +"port is an optional service port at which the webhook will be contacted.\n`port` should be a valid port number (1-65535, inclusive).\nDefaults to 443 for backward compatibility." + +## obj spec.defaultCompositionRef + +"DefaultCompositionRef refers to the Composition resource that will be used\nin case no composition selector is given." + +### fn spec.defaultCompositionRef.withName + +```ts +withName(name) +``` + +"Name of the Composition." + +## obj spec.enforcedCompositionRef + +"EnforcedCompositionRef refers to the Composition resource that will be used\nby all composite instances whose schema is defined by this definition." + +### fn spec.enforcedCompositionRef.withName + +```ts +withName(name) +``` + +"Name of the Composition." + +## obj spec.metadata + +"Metadata specifies the desired metadata for the defined composite resource and claim CRD's." + +### fn spec.metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations" + +### fn spec.metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations" + +**Note:** This function appends passed data to existing values + +### fn spec.metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels\nand services.\nThese labels are added to the composite resource and claim CRD's in addition\nto any labels defined by `CompositionResourceDefinition` `metadata.labels`." + +### fn spec.metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels\nand services.\nThese labels are added to the composite resource and claim CRD's in addition\nto any labels defined by `CompositionResourceDefinition` `metadata.labels`." + +**Note:** This function appends passed data to existing values + +## obj spec.names + +"Names specifies the resource and kind names of the defined composite\nresource." + +### fn spec.names.withCategories + +```ts +withCategories(categories) +``` + +"categories is a list of grouped resources this custom resource belongs to (e.g. 'all').\nThis is published in API discovery documents, and used by clients to support invocations like\n`kubectl get all`." + +### fn spec.names.withCategoriesMixin + +```ts +withCategoriesMixin(categories) +``` + +"categories is a list of grouped resources this custom resource belongs to (e.g. 'all').\nThis is published in API discovery documents, and used by clients to support invocations like\n`kubectl get all`." + +**Note:** This function appends passed data to existing values + +### fn spec.names.withKind + +```ts +withKind(kind) +``` + +"kind is the serialized kind of the resource. It is normally CamelCase and singular.\nCustom resource instances will use this value as the `kind` attribute in API calls." + +### fn spec.names.withListKind + +```ts +withListKind(listKind) +``` + +"listKind is the serialized kind of the list for this resource. Defaults to \"`kind`List\"." + +### fn spec.names.withPlural + +```ts +withPlural(plural) +``` + +"plural is the plural name of the resource to serve.\nThe custom resources are served under `/apis///.../`.\nMust match the name of the CustomResourceDefinition (in the form `.`).\nMust be all lowercase." + +### fn spec.names.withShortNames + +```ts +withShortNames(shortNames) +``` + +"shortNames are short names for the resource, exposed in API discovery documents,\nand used by clients to support invocations like `kubectl get `.\nIt must be all lowercase." + +### fn spec.names.withShortNamesMixin + +```ts +withShortNamesMixin(shortNames) +``` + +"shortNames are short names for the resource, exposed in API discovery documents,\nand used by clients to support invocations like `kubectl get `.\nIt must be all lowercase." + +**Note:** This function appends passed data to existing values + +### fn spec.names.withSingular + +```ts +withSingular(singular) +``` + +"singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`." + +## obj spec.versions + +"Versions is the list of all API versions of the defined composite\nresource. Version names are used to compute the order in which served\nversions are listed in API discovery. If the version string is\n\"kube-like\", it will sort above non \"kube-like\" version strings, which\nare ordered lexicographically. \"Kube-like\" versions start with a \"v\",\nthen are followed by a number (the major version), then optionally the\nstring \"alpha\" or \"beta\" and another number (the minor version). These\nare sorted first by GA > beta > alpha (where GA is a version with no\nsuffix such as beta or alpha), and then by comparing major version, then\nminor version. An example sorted list of versions: v10, v2, v1, v11beta2,\nv10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10." + +### fn spec.versions.withAdditionalPrinterColumns + +```ts +withAdditionalPrinterColumns(additionalPrinterColumns) +``` + +"AdditionalPrinterColumns specifies additional columns returned in Table\noutput. If no columns are specified, a single column displaying the age\nof the custom resource is used. See the following link for details:\nhttps://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables" + +### fn spec.versions.withAdditionalPrinterColumnsMixin + +```ts +withAdditionalPrinterColumnsMixin(additionalPrinterColumns) +``` + +"AdditionalPrinterColumns specifies additional columns returned in Table\noutput. If no columns are specified, a single column displaying the age\nof the custom resource is used. See the following link for details:\nhttps://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables" + +**Note:** This function appends passed data to existing values + +### fn spec.versions.withDeprecated + +```ts +withDeprecated(deprecated) +``` + +"The deprecated field specifies that this version is deprecated and should\nnot be used." + +### fn spec.versions.withDeprecationWarning + +```ts +withDeprecationWarning(deprecationWarning) +``` + +"DeprecationWarning specifies the message that should be shown to the user\nwhen using this version." + +### fn spec.versions.withName + +```ts +withName(name) +``` + +"Name of this version, e.g. “v1”, “v2beta1”, etc. Composite resources are\nserved under this version at `/apis///...` if `served` is\ntrue." + +### fn spec.versions.withReferenceable + +```ts +withReferenceable(referenceable) +``` + +"Referenceable specifies that this version may be referenced by a\nComposition in order to configure which resources an XR may be composed\nof. Exactly one version must be marked as referenceable; all Compositions\nmust target only the referenceable version. The referenceable version\nmust be served. It's mapped to the CRD's `spec.versions[*].storage` field." + +### fn spec.versions.withServed + +```ts +withServed(served) +``` + +"Served specifies that this version should be served via REST APIs." + +## obj spec.versions.additionalPrinterColumns + +"AdditionalPrinterColumns specifies additional columns returned in Table\noutput. If no columns are specified, a single column displaying the age\nof the custom resource is used. See the following link for details:\nhttps://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables" + +### fn spec.versions.additionalPrinterColumns.withDescription + +```ts +withDescription(description) +``` + +"description is a human readable description of this column." + +### fn spec.versions.additionalPrinterColumns.withFormat + +```ts +withFormat(format) +``` + +"format is an optional OpenAPI type definition for this column. The 'name' format is applied\nto the primary identifier column to assist in clients identifying column is the resource name.\nSee https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details." + +### fn spec.versions.additionalPrinterColumns.withJsonPath + +```ts +withJsonPath(jsonPath) +``` + +"jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against\neach custom resource to produce the value for this column." + +### fn spec.versions.additionalPrinterColumns.withName + +```ts +withName(name) +``` + +"name is a human readable name for the column." + +### fn spec.versions.additionalPrinterColumns.withPriority + +```ts +withPriority(priority) +``` + +"priority is an integer defining the relative importance of this column compared to others. Lower\nnumbers are considered higher priority. Columns that may be omitted in limited space scenarios\nshould be given a priority greater than 0." + +### fn spec.versions.additionalPrinterColumns.withType + +```ts +withType(type) +``` + +"type is an OpenAPI type definition for this column.\nSee https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details." + +## obj spec.versions.schema + +"Schema describes the schema used for validation, pruning, and defaulting\nof this version of the defined composite resource. Fields required by all\ncomposite resources will be injected into this schema automatically, and\nwill override equivalently named fields in this schema. Omitting this\nschema results in a schema that contains only the fields required by all\ncomposite resources." + +### fn spec.versions.schema.withOpenAPIV3Schema + +```ts +withOpenAPIV3Schema(openAPIV3Schema) +``` + +"OpenAPIV3Schema is the OpenAPI v3 schema to use for validation and\npruning." + +### fn spec.versions.schema.withOpenAPIV3SchemaMixin + +```ts +withOpenAPIV3SchemaMixin(openAPIV3Schema) +``` + +"OpenAPIV3Schema is the OpenAPI v3 schema to use for validation and\npruning." + +**Note:** This function appends passed data to existing values \ No newline at end of file diff --git a/docs/crossplane/1.17/apiextensions/v1/composition.md b/docs/crossplane/1.17/apiextensions/v1/composition.md new file mode 100644 index 0000000..765e045 --- /dev/null +++ b/docs/crossplane/1.17/apiextensions/v1/composition.md @@ -0,0 +1,2168 @@ +--- +permalink: /crossplane/1.17/apiextensions/v1/composition/ +--- + +# apiextensions.v1.composition + +"A Composition defines a collection of managed resources or functions that\nCrossplane uses to create and manage new composite resources.\n\n\nRead the Crossplane documentation for\n[more information about Compositions](https://docs.crossplane.io/latest/concepts/compositions)." + +## Index + +* [`fn new(name)`](#fn-new) +* [`fn fromXRD(name, namespace, provider, xrdRef, xrdVersion)`](#fn-fromxrd) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withMode(mode)`](#fn-specwithmode) + * [`fn withPatchSets(patchSets)`](#fn-specwithpatchsets) + * [`fn withPatchSetsMixin(patchSets)`](#fn-specwithpatchsetsmixin) + * [`fn withPipeline(pipeline)`](#fn-specwithpipeline) + * [`fn withPipelineMixin(pipeline)`](#fn-specwithpipelinemixin) + * [`fn withResources(resources)`](#fn-specwithresources) + * [`fn withResourcesMixin(resources)`](#fn-specwithresourcesmixin) + * [`fn withWriteConnectionSecretsToNamespace(writeConnectionSecretsToNamespace)`](#fn-specwithwriteconnectionsecretstonamespace) + * [`obj spec.compositeTypeRef`](#obj-speccompositetyperef) + * [`fn withApiVersion(apiVersion)`](#fn-speccompositetyperefwithapiversion) + * [`fn withKind(kind)`](#fn-speccompositetyperefwithkind) + * [`obj spec.environment`](#obj-specenvironment) + * [`fn withDefaultData(defaultData)`](#fn-specenvironmentwithdefaultdata) + * [`fn withDefaultDataMixin(defaultData)`](#fn-specenvironmentwithdefaultdatamixin) + * [`fn withEnvironmentConfigs(environmentConfigs)`](#fn-specenvironmentwithenvironmentconfigs) + * [`fn withEnvironmentConfigsMixin(environmentConfigs)`](#fn-specenvironmentwithenvironmentconfigsmixin) + * [`fn withPatches(patches)`](#fn-specenvironmentwithpatches) + * [`fn withPatchesMixin(patches)`](#fn-specenvironmentwithpatchesmixin) + * [`obj spec.environment.environmentConfigs`](#obj-specenvironmentenvironmentconfigs) + * [`fn withType(type)`](#fn-specenvironmentenvironmentconfigswithtype) + * [`obj spec.environment.environmentConfigs.ref`](#obj-specenvironmentenvironmentconfigsref) + * [`fn withName(name)`](#fn-specenvironmentenvironmentconfigsrefwithname) + * [`obj spec.environment.environmentConfigs.selector`](#obj-specenvironmentenvironmentconfigsselector) + * [`fn withMatchLabels(matchLabels)`](#fn-specenvironmentenvironmentconfigsselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specenvironmentenvironmentconfigsselectorwithmatchlabelsmixin) + * [`fn withMaxMatch(maxMatch)`](#fn-specenvironmentenvironmentconfigsselectorwithmaxmatch) + * [`fn withMinMatch(minMatch)`](#fn-specenvironmentenvironmentconfigsselectorwithminmatch) + * [`fn withMode(mode)`](#fn-specenvironmentenvironmentconfigsselectorwithmode) + * [`fn withSortByFieldPath(sortByFieldPath)`](#fn-specenvironmentenvironmentconfigsselectorwithsortbyfieldpath) + * [`obj spec.environment.environmentConfigs.selector.matchLabels`](#obj-specenvironmentenvironmentconfigsselectormatchlabels) + * [`fn withFromFieldPathPolicy(fromFieldPathPolicy)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithfromfieldpathpolicy) + * [`fn withKey(key)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithkey) + * [`fn withType(type)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithtype) + * [`fn withValue(value)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithvalue) + * [`fn withValueFromFieldPath(valueFromFieldPath)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithvaluefromfieldpath) + * [`obj spec.environment.patches`](#obj-specenvironmentpatches) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specenvironmentpatcheswithfromfieldpath) + * [`fn withToFieldPath(toFieldPath)`](#fn-specenvironmentpatcheswithtofieldpath) + * [`fn withTransforms(transforms)`](#fn-specenvironmentpatcheswithtransforms) + * [`fn withTransformsMixin(transforms)`](#fn-specenvironmentpatcheswithtransformsmixin) + * [`fn withType(type)`](#fn-specenvironmentpatcheswithtype) + * [`obj spec.environment.patches.combine`](#obj-specenvironmentpatchescombine) + * [`fn withStrategy(strategy)`](#fn-specenvironmentpatchescombinewithstrategy) + * [`fn withVariables(variables)`](#fn-specenvironmentpatchescombinewithvariables) + * [`fn withVariablesMixin(variables)`](#fn-specenvironmentpatchescombinewithvariablesmixin) + * [`obj spec.environment.patches.combine.string`](#obj-specenvironmentpatchescombinestring) + * [`fn withFmt(fmt)`](#fn-specenvironmentpatchescombinestringwithfmt) + * [`obj spec.environment.patches.combine.variables`](#obj-specenvironmentpatchescombinevariables) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specenvironmentpatchescombinevariableswithfromfieldpath) + * [`obj spec.environment.patches.policy`](#obj-specenvironmentpatchespolicy) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specenvironmentpatchespolicywithfromfieldpath) + * [`obj spec.environment.patches.policy.mergeOptions`](#obj-specenvironmentpatchespolicymergeoptions) + * [`fn withAppendSlice(appendSlice)`](#fn-specenvironmentpatchespolicymergeoptionswithappendslice) + * [`fn withKeepMapValues(keepMapValues)`](#fn-specenvironmentpatchespolicymergeoptionswithkeepmapvalues) + * [`obj spec.environment.patches.transforms`](#obj-specenvironmentpatchestransforms) + * [`fn withMap(map)`](#fn-specenvironmentpatchestransformswithmap) + * [`fn withMapMixin(map)`](#fn-specenvironmentpatchestransformswithmapmixin) + * [`fn withType(type)`](#fn-specenvironmentpatchestransformswithtype) + * [`obj spec.environment.patches.transforms.convert`](#obj-specenvironmentpatchestransformsconvert) + * [`fn withFormat(format)`](#fn-specenvironmentpatchestransformsconvertwithformat) + * [`fn withToType(toType)`](#fn-specenvironmentpatchestransformsconvertwithtotype) + * [`obj spec.environment.patches.transforms.match`](#obj-specenvironmentpatchestransformsmatch) + * [`fn withFallbackTo(fallbackTo)`](#fn-specenvironmentpatchestransformsmatchwithfallbackto) + * [`fn withFallbackValue(fallbackValue)`](#fn-specenvironmentpatchestransformsmatchwithfallbackvalue) + * [`fn withPatterns(patterns)`](#fn-specenvironmentpatchestransformsmatchwithpatterns) + * [`fn withPatternsMixin(patterns)`](#fn-specenvironmentpatchestransformsmatchwithpatternsmixin) + * [`obj spec.environment.patches.transforms.match.patterns`](#obj-specenvironmentpatchestransformsmatchpatterns) + * [`fn withLiteral(literal)`](#fn-specenvironmentpatchestransformsmatchpatternswithliteral) + * [`fn withRegexp(regexp)`](#fn-specenvironmentpatchestransformsmatchpatternswithregexp) + * [`fn withResult(result)`](#fn-specenvironmentpatchestransformsmatchpatternswithresult) + * [`fn withType(type)`](#fn-specenvironmentpatchestransformsmatchpatternswithtype) + * [`obj spec.environment.patches.transforms.math`](#obj-specenvironmentpatchestransformsmath) + * [`fn withClampMax(clampMax)`](#fn-specenvironmentpatchestransformsmathwithclampmax) + * [`fn withClampMin(clampMin)`](#fn-specenvironmentpatchestransformsmathwithclampmin) + * [`fn withMultiply(multiply)`](#fn-specenvironmentpatchestransformsmathwithmultiply) + * [`fn withType(type)`](#fn-specenvironmentpatchestransformsmathwithtype) + * [`obj spec.environment.patches.transforms.string`](#obj-specenvironmentpatchestransformsstring) + * [`fn withConvert(convert)`](#fn-specenvironmentpatchestransformsstringwithconvert) + * [`fn withFmt(fmt)`](#fn-specenvironmentpatchestransformsstringwithfmt) + * [`fn withTrim(trim)`](#fn-specenvironmentpatchestransformsstringwithtrim) + * [`fn withType(type)`](#fn-specenvironmentpatchestransformsstringwithtype) + * [`obj spec.environment.patches.transforms.string.join`](#obj-specenvironmentpatchestransformsstringjoin) + * [`fn withSeparator(separator)`](#fn-specenvironmentpatchestransformsstringjoinwithseparator) + * [`obj spec.environment.patches.transforms.string.regexp`](#obj-specenvironmentpatchestransformsstringregexp) + * [`fn withGroup(group)`](#fn-specenvironmentpatchestransformsstringregexpwithgroup) + * [`fn withMatch(match)`](#fn-specenvironmentpatchestransformsstringregexpwithmatch) + * [`obj spec.environment.policy`](#obj-specenvironmentpolicy) + * [`fn withResolution(resolution)`](#fn-specenvironmentpolicywithresolution) + * [`fn withResolve(resolve)`](#fn-specenvironmentpolicywithresolve) + * [`obj spec.patchSets`](#obj-specpatchsets) + * [`fn withName(name)`](#fn-specpatchsetswithname) + * [`fn withPatches(patches)`](#fn-specpatchsetswithpatches) + * [`fn withPatchesMixin(patches)`](#fn-specpatchsetswithpatchesmixin) + * [`obj spec.patchSets.patches`](#obj-specpatchsetspatches) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specpatchsetspatcheswithfromfieldpath) + * [`fn withPatchSetName(patchSetName)`](#fn-specpatchsetspatcheswithpatchsetname) + * [`fn withToFieldPath(toFieldPath)`](#fn-specpatchsetspatcheswithtofieldpath) + * [`fn withTransforms(transforms)`](#fn-specpatchsetspatcheswithtransforms) + * [`fn withTransformsMixin(transforms)`](#fn-specpatchsetspatcheswithtransformsmixin) + * [`fn withType(type)`](#fn-specpatchsetspatcheswithtype) + * [`obj spec.patchSets.patches.combine`](#obj-specpatchsetspatchescombine) + * [`fn withStrategy(strategy)`](#fn-specpatchsetspatchescombinewithstrategy) + * [`fn withVariables(variables)`](#fn-specpatchsetspatchescombinewithvariables) + * [`fn withVariablesMixin(variables)`](#fn-specpatchsetspatchescombinewithvariablesmixin) + * [`obj spec.patchSets.patches.combine.string`](#obj-specpatchsetspatchescombinestring) + * [`fn withFmt(fmt)`](#fn-specpatchsetspatchescombinestringwithfmt) + * [`obj spec.patchSets.patches.combine.variables`](#obj-specpatchsetspatchescombinevariables) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specpatchsetspatchescombinevariableswithfromfieldpath) + * [`obj spec.patchSets.patches.policy`](#obj-specpatchsetspatchespolicy) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specpatchsetspatchespolicywithfromfieldpath) + * [`obj spec.patchSets.patches.policy.mergeOptions`](#obj-specpatchsetspatchespolicymergeoptions) + * [`fn withAppendSlice(appendSlice)`](#fn-specpatchsetspatchespolicymergeoptionswithappendslice) + * [`fn withKeepMapValues(keepMapValues)`](#fn-specpatchsetspatchespolicymergeoptionswithkeepmapvalues) + * [`obj spec.patchSets.patches.transforms`](#obj-specpatchsetspatchestransforms) + * [`fn withMap(map)`](#fn-specpatchsetspatchestransformswithmap) + * [`fn withMapMixin(map)`](#fn-specpatchsetspatchestransformswithmapmixin) + * [`fn withType(type)`](#fn-specpatchsetspatchestransformswithtype) + * [`obj spec.patchSets.patches.transforms.convert`](#obj-specpatchsetspatchestransformsconvert) + * [`fn withFormat(format)`](#fn-specpatchsetspatchestransformsconvertwithformat) + * [`fn withToType(toType)`](#fn-specpatchsetspatchestransformsconvertwithtotype) + * [`obj spec.patchSets.patches.transforms.match`](#obj-specpatchsetspatchestransformsmatch) + * [`fn withFallbackTo(fallbackTo)`](#fn-specpatchsetspatchestransformsmatchwithfallbackto) + * [`fn withFallbackValue(fallbackValue)`](#fn-specpatchsetspatchestransformsmatchwithfallbackvalue) + * [`fn withPatterns(patterns)`](#fn-specpatchsetspatchestransformsmatchwithpatterns) + * [`fn withPatternsMixin(patterns)`](#fn-specpatchsetspatchestransformsmatchwithpatternsmixin) + * [`obj spec.patchSets.patches.transforms.match.patterns`](#obj-specpatchsetspatchestransformsmatchpatterns) + * [`fn withLiteral(literal)`](#fn-specpatchsetspatchestransformsmatchpatternswithliteral) + * [`fn withRegexp(regexp)`](#fn-specpatchsetspatchestransformsmatchpatternswithregexp) + * [`fn withResult(result)`](#fn-specpatchsetspatchestransformsmatchpatternswithresult) + * [`fn withType(type)`](#fn-specpatchsetspatchestransformsmatchpatternswithtype) + * [`obj spec.patchSets.patches.transforms.math`](#obj-specpatchsetspatchestransformsmath) + * [`fn withClampMax(clampMax)`](#fn-specpatchsetspatchestransformsmathwithclampmax) + * [`fn withClampMin(clampMin)`](#fn-specpatchsetspatchestransformsmathwithclampmin) + * [`fn withMultiply(multiply)`](#fn-specpatchsetspatchestransformsmathwithmultiply) + * [`fn withType(type)`](#fn-specpatchsetspatchestransformsmathwithtype) + * [`obj spec.patchSets.patches.transforms.string`](#obj-specpatchsetspatchestransformsstring) + * [`fn withConvert(convert)`](#fn-specpatchsetspatchestransformsstringwithconvert) + * [`fn withFmt(fmt)`](#fn-specpatchsetspatchestransformsstringwithfmt) + * [`fn withTrim(trim)`](#fn-specpatchsetspatchestransformsstringwithtrim) + * [`fn withType(type)`](#fn-specpatchsetspatchestransformsstringwithtype) + * [`obj spec.patchSets.patches.transforms.string.join`](#obj-specpatchsetspatchestransformsstringjoin) + * [`fn withSeparator(separator)`](#fn-specpatchsetspatchestransformsstringjoinwithseparator) + * [`obj spec.patchSets.patches.transforms.string.regexp`](#obj-specpatchsetspatchestransformsstringregexp) + * [`fn withGroup(group)`](#fn-specpatchsetspatchestransformsstringregexpwithgroup) + * [`fn withMatch(match)`](#fn-specpatchsetspatchestransformsstringregexpwithmatch) + * [`obj spec.pipeline`](#obj-specpipeline) + * [`fn withCredentials(credentials)`](#fn-specpipelinewithcredentials) + * [`fn withCredentialsMixin(credentials)`](#fn-specpipelinewithcredentialsmixin) + * [`fn withInput(input)`](#fn-specpipelinewithinput) + * [`fn withInputMixin(input)`](#fn-specpipelinewithinputmixin) + * [`fn withStep(step)`](#fn-specpipelinewithstep) + * [`obj spec.pipeline.credentials`](#obj-specpipelinecredentials) + * [`fn withName(name)`](#fn-specpipelinecredentialswithname) + * [`fn withSource(source)`](#fn-specpipelinecredentialswithsource) + * [`obj spec.pipeline.credentials.secretRef`](#obj-specpipelinecredentialssecretref) + * [`fn withName(name)`](#fn-specpipelinecredentialssecretrefwithname) + * [`fn withNamespace(namespace)`](#fn-specpipelinecredentialssecretrefwithnamespace) + * [`obj spec.pipeline.functionRef`](#obj-specpipelinefunctionref) + * [`fn withName(name)`](#fn-specpipelinefunctionrefwithname) + * [`obj spec.publishConnectionDetailsWithStoreConfigRef`](#obj-specpublishconnectiondetailswithstoreconfigref) + * [`fn withName(name)`](#fn-specpublishconnectiondetailswithstoreconfigrefwithname) + * [`obj spec.resources`](#obj-specresources) + * [`fn withBase(base)`](#fn-specresourceswithbase) + * [`fn withBaseMixin(base)`](#fn-specresourceswithbasemixin) + * [`fn withConnectionDetails(connectionDetails)`](#fn-specresourceswithconnectiondetails) + * [`fn withConnectionDetailsMixin(connectionDetails)`](#fn-specresourceswithconnectiondetailsmixin) + * [`fn withName(name)`](#fn-specresourceswithname) + * [`fn withPatches(patches)`](#fn-specresourceswithpatches) + * [`fn withPatchesMixin(patches)`](#fn-specresourceswithpatchesmixin) + * [`fn withReadinessChecks(readinessChecks)`](#fn-specresourceswithreadinesschecks) + * [`fn withReadinessChecksMixin(readinessChecks)`](#fn-specresourceswithreadinesschecksmixin) + * [`obj spec.resources.connectionDetails`](#obj-specresourcesconnectiondetails) + * [`fn withFromConnectionSecretKey(fromConnectionSecretKey)`](#fn-specresourcesconnectiondetailswithfromconnectionsecretkey) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specresourcesconnectiondetailswithfromfieldpath) + * [`fn withName(name)`](#fn-specresourcesconnectiondetailswithname) + * [`fn withType(type)`](#fn-specresourcesconnectiondetailswithtype) + * [`fn withValue(value)`](#fn-specresourcesconnectiondetailswithvalue) + * [`obj spec.resources.patches`](#obj-specresourcespatches) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specresourcespatcheswithfromfieldpath) + * [`fn withPatchSetName(patchSetName)`](#fn-specresourcespatcheswithpatchsetname) + * [`fn withToFieldPath(toFieldPath)`](#fn-specresourcespatcheswithtofieldpath) + * [`fn withTransforms(transforms)`](#fn-specresourcespatcheswithtransforms) + * [`fn withTransformsMixin(transforms)`](#fn-specresourcespatcheswithtransformsmixin) + * [`fn withType(type)`](#fn-specresourcespatcheswithtype) + * [`obj spec.resources.patches.combine`](#obj-specresourcespatchescombine) + * [`fn withStrategy(strategy)`](#fn-specresourcespatchescombinewithstrategy) + * [`fn withVariables(variables)`](#fn-specresourcespatchescombinewithvariables) + * [`fn withVariablesMixin(variables)`](#fn-specresourcespatchescombinewithvariablesmixin) + * [`obj spec.resources.patches.combine.string`](#obj-specresourcespatchescombinestring) + * [`fn withFmt(fmt)`](#fn-specresourcespatchescombinestringwithfmt) + * [`obj spec.resources.patches.combine.variables`](#obj-specresourcespatchescombinevariables) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specresourcespatchescombinevariableswithfromfieldpath) + * [`obj spec.resources.patches.policy`](#obj-specresourcespatchespolicy) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specresourcespatchespolicywithfromfieldpath) + * [`obj spec.resources.patches.policy.mergeOptions`](#obj-specresourcespatchespolicymergeoptions) + * [`fn withAppendSlice(appendSlice)`](#fn-specresourcespatchespolicymergeoptionswithappendslice) + * [`fn withKeepMapValues(keepMapValues)`](#fn-specresourcespatchespolicymergeoptionswithkeepmapvalues) + * [`obj spec.resources.patches.transforms`](#obj-specresourcespatchestransforms) + * [`fn withMap(map)`](#fn-specresourcespatchestransformswithmap) + * [`fn withMapMixin(map)`](#fn-specresourcespatchestransformswithmapmixin) + * [`fn withType(type)`](#fn-specresourcespatchestransformswithtype) + * [`obj spec.resources.patches.transforms.convert`](#obj-specresourcespatchestransformsconvert) + * [`fn withFormat(format)`](#fn-specresourcespatchestransformsconvertwithformat) + * [`fn withToType(toType)`](#fn-specresourcespatchestransformsconvertwithtotype) + * [`obj spec.resources.patches.transforms.match`](#obj-specresourcespatchestransformsmatch) + * [`fn withFallbackTo(fallbackTo)`](#fn-specresourcespatchestransformsmatchwithfallbackto) + * [`fn withFallbackValue(fallbackValue)`](#fn-specresourcespatchestransformsmatchwithfallbackvalue) + * [`fn withPatterns(patterns)`](#fn-specresourcespatchestransformsmatchwithpatterns) + * [`fn withPatternsMixin(patterns)`](#fn-specresourcespatchestransformsmatchwithpatternsmixin) + * [`obj spec.resources.patches.transforms.match.patterns`](#obj-specresourcespatchestransformsmatchpatterns) + * [`fn withLiteral(literal)`](#fn-specresourcespatchestransformsmatchpatternswithliteral) + * [`fn withRegexp(regexp)`](#fn-specresourcespatchestransformsmatchpatternswithregexp) + * [`fn withResult(result)`](#fn-specresourcespatchestransformsmatchpatternswithresult) + * [`fn withType(type)`](#fn-specresourcespatchestransformsmatchpatternswithtype) + * [`obj spec.resources.patches.transforms.math`](#obj-specresourcespatchestransformsmath) + * [`fn withClampMax(clampMax)`](#fn-specresourcespatchestransformsmathwithclampmax) + * [`fn withClampMin(clampMin)`](#fn-specresourcespatchestransformsmathwithclampmin) + * [`fn withMultiply(multiply)`](#fn-specresourcespatchestransformsmathwithmultiply) + * [`fn withType(type)`](#fn-specresourcespatchestransformsmathwithtype) + * [`obj spec.resources.patches.transforms.string`](#obj-specresourcespatchestransformsstring) + * [`fn withConvert(convert)`](#fn-specresourcespatchestransformsstringwithconvert) + * [`fn withFmt(fmt)`](#fn-specresourcespatchestransformsstringwithfmt) + * [`fn withTrim(trim)`](#fn-specresourcespatchestransformsstringwithtrim) + * [`fn withType(type)`](#fn-specresourcespatchestransformsstringwithtype) + * [`obj spec.resources.patches.transforms.string.join`](#obj-specresourcespatchestransformsstringjoin) + * [`fn withSeparator(separator)`](#fn-specresourcespatchestransformsstringjoinwithseparator) + * [`obj spec.resources.patches.transforms.string.regexp`](#obj-specresourcespatchestransformsstringregexp) + * [`fn withGroup(group)`](#fn-specresourcespatchestransformsstringregexpwithgroup) + * [`fn withMatch(match)`](#fn-specresourcespatchestransformsstringregexpwithmatch) + * [`obj spec.resources.readinessChecks`](#obj-specresourcesreadinesschecks) + * [`fn withFieldPath(fieldPath)`](#fn-specresourcesreadinesscheckswithfieldpath) + * [`fn withMatchInteger(matchInteger)`](#fn-specresourcesreadinesscheckswithmatchinteger) + * [`fn withMatchString(matchString)`](#fn-specresourcesreadinesscheckswithmatchstring) + * [`fn withType(type)`](#fn-specresourcesreadinesscheckswithtype) + * [`obj spec.resources.readinessChecks.matchCondition`](#obj-specresourcesreadinesschecksmatchcondition) + * [`fn withType(type)`](#fn-specresourcesreadinesschecksmatchconditionwithtype) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Composition + +### fn fromXRD + +```ts +fromXRD(name, namespace, provider, xrdRef, xrdVersion) +``` + +Create a Composition based on an XRD. + +Attributes: +- `name` of the composition +- `namespace` where connectionDetails are propagated too, commonly the the + management namespace (ie. crossplane) +- `provider` of the resources in this composition +- `xrdRef` XRD object with which this composition is compatible +- `xrdVersion` Version of XRD object with which this composition is compatible + + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"CompositionSpec specifies desired state of a composition." + +### fn spec.withMode + +```ts +withMode(mode) +``` + +"Mode controls what type or \"mode\" of Composition will be used.\n\n\n\"Pipeline\" indicates that a Composition specifies a pipeline of\nComposition Functions, each of which is responsible for producing\ncomposed resources that Crossplane should create or update.\n\n\n\"Resources\" indicates that a Composition uses what is commonly referred\nto as \"Patch & Transform\" or P&T composition. This mode of Composition\nuses an array of resources, each a template for a composed resource.\n\n\nAll Compositions should use Pipeline mode. Resources mode is deprecated.\nResources mode won't be removed in Crossplane 1.x, and will remain the\ndefault to avoid breaking legacy Compositions. However, it's no longer\naccepting new features, and only accepting security related bug fixes." + +### fn spec.withPatchSets + +```ts +withPatchSets(patchSets) +``` + +"PatchSets define a named set of patches that may be included by any\nresource in this Composition. PatchSets cannot themselves refer to other\nPatchSets.\n\n\nPatchSets are only used by the \"Resources\" mode of Composition. They\nare ignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +### fn spec.withPatchSetsMixin + +```ts +withPatchSetsMixin(patchSets) +``` + +"PatchSets define a named set of patches that may be included by any\nresource in this Composition. PatchSets cannot themselves refer to other\nPatchSets.\n\n\nPatchSets are only used by the \"Resources\" mode of Composition. They\nare ignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +**Note:** This function appends passed data to existing values + +### fn spec.withPipeline + +```ts +withPipeline(pipeline) +``` + +"Pipeline is a list of composition function steps that will be used when a\ncomposite resource referring to this composition is created. One of\nresources and pipeline must be specified - you cannot specify both.\n\n\nThe Pipeline is only used by the \"Pipeline\" mode of Composition. It is\nignored by other modes." + +### fn spec.withPipelineMixin + +```ts +withPipelineMixin(pipeline) +``` + +"Pipeline is a list of composition function steps that will be used when a\ncomposite resource referring to this composition is created. One of\nresources and pipeline must be specified - you cannot specify both.\n\n\nThe Pipeline is only used by the \"Pipeline\" mode of Composition. It is\nignored by other modes." + +**Note:** This function appends passed data to existing values + +### fn spec.withResources + +```ts +withResources(resources) +``` + +"Resources is a list of resource templates that will be used when a\ncomposite resource referring to this composition is created.\n\n\nResources are only used by the \"Resources\" mode of Composition. They are\nignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +### fn spec.withResourcesMixin + +```ts +withResourcesMixin(resources) +``` + +"Resources is a list of resource templates that will be used when a\ncomposite resource referring to this composition is created.\n\n\nResources are only used by the \"Resources\" mode of Composition. They are\nignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +**Note:** This function appends passed data to existing values + +### fn spec.withWriteConnectionSecretsToNamespace + +```ts +withWriteConnectionSecretsToNamespace(writeConnectionSecretsToNamespace) +``` + +"WriteConnectionSecretsToNamespace specifies the namespace in which the\nconnection secrets of composite resource dynamically provisioned using\nthis composition will be created.\nThis field is planned to be replaced in a future release in favor of\nPublishConnectionDetailsWithStoreConfigRef. Currently, both could be\nset independently and connection details would be published to both\nwithout affecting each other as long as related fields at MR level\nspecified." + +## obj spec.compositeTypeRef + +"CompositeTypeRef specifies the type of composite resource that this\ncomposition is compatible with." + +### fn spec.compositeTypeRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"APIVersion of the type." + +### fn spec.compositeTypeRef.withKind + +```ts +withKind(kind) +``` + +"Kind of the type." + +## obj spec.environment + +"Environment configures the environment in which resources are rendered.\n\n\nTHIS IS AN ALPHA FIELD. Do not use it in production. It is not honored\nunless the relevant Crossplane feature flag is enabled, and may be\nchanged or removed without notice." + +### fn spec.environment.withDefaultData + +```ts +withDefaultData(defaultData) +``` + +"DefaultData statically defines the initial state of the environment.\nIt has the same schema-less structure as the data field in\nenvironment configs.\nIt is overwritten by the selected environment configs." + +### fn spec.environment.withDefaultDataMixin + +```ts +withDefaultDataMixin(defaultData) +``` + +"DefaultData statically defines the initial state of the environment.\nIt has the same schema-less structure as the data field in\nenvironment configs.\nIt is overwritten by the selected environment configs." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.withEnvironmentConfigs + +```ts +withEnvironmentConfigs(environmentConfigs) +``` + +"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\nresources are stored in the composite resource at\n`spec.environmentConfigRefs` and is only updated if it is null.\n\n\nThe list of references is used to compute an in-memory environment at\ncompose time. The data of all object is merged in the order they are\nlisted, meaning the values of EnvironmentConfigs with a larger index take\npriority over ones with smaller indices.\n\n\nThe computed environment can be accessed in a composition using\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches." + +### fn spec.environment.withEnvironmentConfigsMixin + +```ts +withEnvironmentConfigsMixin(environmentConfigs) +``` + +"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\nresources are stored in the composite resource at\n`spec.environmentConfigRefs` and is only updated if it is null.\n\n\nThe list of references is used to compute an in-memory environment at\ncompose time. The data of all object is merged in the order they are\nlisted, meaning the values of EnvironmentConfigs with a larger index take\npriority over ones with smaller indices.\n\n\nThe computed environment can be accessed in a composition using\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.withPatches + +```ts +withPatches(patches) +``` + +"Patches is a list of environment patches that are executed before a\ncomposition's resources are composed." + +### fn spec.environment.withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +"Patches is a list of environment patches that are executed before a\ncomposition's resources are composed." + +**Note:** This function appends passed data to existing values + +## obj spec.environment.environmentConfigs + +"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\nresources are stored in the composite resource at\n`spec.environmentConfigRefs` and is only updated if it is null.\n\n\nThe list of references is used to compute an in-memory environment at\ncompose time. The data of all object is merged in the order they are\nlisted, meaning the values of EnvironmentConfigs with a larger index take\npriority over ones with smaller indices.\n\n\nThe computed environment can be accessed in a composition using\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches." + +### fn spec.environment.environmentConfigs.withType + +```ts +withType(type) +``` + +"Type specifies the way the EnvironmentConfig is selected.\nDefault is `Reference`" + +## obj spec.environment.environmentConfigs.ref + +"Ref is a named reference to a single EnvironmentConfig.\nEither Ref or Selector is required." + +### fn spec.environment.environmentConfigs.ref.withName + +```ts +withName(name) +``` + +"The name of the object." + +## obj spec.environment.environmentConfigs.selector + +"Selector selects EnvironmentConfig(s) via labels." + +### fn spec.environment.environmentConfigs.selector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"MatchLabels ensures an object with matching labels is selected." + +### fn spec.environment.environmentConfigs.selector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"MatchLabels ensures an object with matching labels is selected." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.environmentConfigs.selector.withMaxMatch + +```ts +withMaxMatch(maxMatch) +``` + +"MaxMatch specifies the number of extracted EnvironmentConfigs in Multiple mode, extracts all if nil." + +### fn spec.environment.environmentConfigs.selector.withMinMatch + +```ts +withMinMatch(minMatch) +``` + +"MinMatch specifies the required minimum of extracted EnvironmentConfigs in Multiple mode." + +### fn spec.environment.environmentConfigs.selector.withMode + +```ts +withMode(mode) +``` + +"Mode specifies retrieval strategy: \"Single\" or \"Multiple\"." + +### fn spec.environment.environmentConfigs.selector.withSortByFieldPath + +```ts +withSortByFieldPath(sortByFieldPath) +``` + +"SortByFieldPath is the path to the field based on which list of EnvironmentConfigs is alphabetically sorted." + +## obj spec.environment.environmentConfigs.selector.matchLabels + +"MatchLabels ensures an object with matching labels is selected." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withFromFieldPathPolicy + +```ts +withFromFieldPathPolicy(fromFieldPathPolicy) +``` + +"FromFieldPathPolicy specifies the policy for the valueFromFieldPath.\nThe default is Required, meaning that an error will be returned if the\nfield is not found in the composite resource.\nOptional means that if the field is not found in the composite resource,\nthat label pair will just be skipped. N.B. other specified label\nmatchers will still be used to retrieve the desired\nenvironment config, if any." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withKey + +```ts +withKey(key) +``` + +"Key of the label to match." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withType + +```ts +withType(type) +``` + +"Type specifies where the value for a label comes from." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withValue + +```ts +withValue(value) +``` + +"Value specifies a literal label value." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withValueFromFieldPath + +```ts +withValueFromFieldPath(valueFromFieldPath) +``` + +"ValueFromFieldPath specifies the field path to look for the label value." + +## obj spec.environment.patches + +"Patches is a list of environment patches that are executed before a\ncomposition's resources are composed." + +### fn spec.environment.patches.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the resource whose value is\nto be used as input. Required when type is FromCompositeFieldPath or\nToCompositeFieldPath." + +### fn spec.environment.patches.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath is the path of the field on the resource whose value will\nbe changed with the result of transforms. Leave empty if you'd like to\npropagate to the same path as fromFieldPath." + +### fn spec.environment.patches.withTransforms + +```ts +withTransforms(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.environment.patches.withTransformsMixin + +```ts +withTransformsMixin(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.patches.withType + +```ts +withType(type) +``` + +"Type sets the patching behaviour to be used. Each patch type may require\nits own fields to be set on the Patch object." + +## obj spec.environment.patches.combine + +"Combine is the patch configuration for a CombineFromComposite or\nCombineToComposite patch." + +### fn spec.environment.patches.combine.withStrategy + +```ts +withStrategy(strategy) +``` + +"Strategy defines the strategy to use to combine the input variable values.\nCurrently only string is supported." + +### fn spec.environment.patches.combine.withVariables + +```ts +withVariables(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.environment.patches.combine.withVariablesMixin + +```ts +withVariablesMixin(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +**Note:** This function appends passed data to existing values + +## obj spec.environment.patches.combine.string + +"String declares that input variables should be combined into a single\nstring, using the relevant settings for formatting purposes." + +### fn spec.environment.patches.combine.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +## obj spec.environment.patches.combine.variables + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.environment.patches.combine.variables.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the source whose value is\nto be used as input." + +## obj spec.environment.patches.policy + +"Policy configures the specifics of patching behaviour." + +### fn spec.environment.patches.policy.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath specifies how to patch from a field path. The default is\n'Optional', which means the patch will be a no-op if the specified\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\nthe specified path does not exist." + +## obj spec.environment.patches.policy.mergeOptions + +"MergeOptions Specifies merge options on a field path." + +### fn spec.environment.patches.policy.mergeOptions.withAppendSlice + +```ts +withAppendSlice(appendSlice) +``` + +"Specifies that already existing elements in a merged slice should be preserved" + +### fn spec.environment.patches.policy.mergeOptions.withKeepMapValues + +```ts +withKeepMapValues(keepMapValues) +``` + +"Specifies that already existing values in a merged map should be preserved" + +## obj spec.environment.patches.transforms + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.environment.patches.transforms.withMap + +```ts +withMap(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +### fn spec.environment.patches.transforms.withMapMixin + +```ts +withMapMixin(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.patches.transforms.withType + +```ts +withType(type) +``` + +"Type of the transform to be run." + +## obj spec.environment.patches.transforms.convert + +"Convert is used to cast the input into the given output type." + +### fn spec.environment.patches.transforms.convert.withFormat + +```ts +withFormat(format) +``` + +"The expected input format.\n\n\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\nOnly used during `string -> float64` conversions.\n* `json` - parses the input as a JSON string.\nOnly used during `string -> object` or `string -> list` conversions.\n\n\nIf this property is null, the default conversion is applied." + +### fn spec.environment.patches.transforms.convert.withToType + +```ts +withToType(toType) +``` + +"ToType is the type of the output of this transform." + +## obj spec.environment.patches.transforms.match + +"Match is a more complex version of Map that matches a list of patterns." + +### fn spec.environment.patches.transforms.match.withFallbackTo + +```ts +withFallbackTo(fallbackTo) +``` + +"Determines to what value the transform should fallback if no pattern matches." + +### fn spec.environment.patches.transforms.match.withFallbackValue + +```ts +withFallbackValue(fallbackValue) +``` + +"The fallback value that should be returned by the transform if now pattern\nmatches." + +### fn spec.environment.patches.transforms.match.withPatterns + +```ts +withPatterns(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.environment.patches.transforms.match.withPatternsMixin + +```ts +withPatternsMixin(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +**Note:** This function appends passed data to existing values + +## obj spec.environment.patches.transforms.match.patterns + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.environment.patches.transforms.match.patterns.withLiteral + +```ts +withLiteral(literal) +``` + +"Literal exactly matches the input string (case sensitive).\nIs required if `type` is `literal`." + +### fn spec.environment.patches.transforms.match.patterns.withRegexp + +```ts +withRegexp(regexp) +``` + +"Regexp to match against the input string.\nIs required if `type` is `regexp`." + +### fn spec.environment.patches.transforms.match.patterns.withResult + +```ts +withResult(result) +``` + +"The value that is used as result of the transform if the pattern matches." + +### fn spec.environment.patches.transforms.match.patterns.withType + +```ts +withType(type) +``` + +"Type specifies how the pattern matches the input.\n\n\n* `literal` - the pattern value has to exactly match (case sensitive) the\ninput string. This is the default.\n\n\n* `regexp` - the pattern treated as a regular expression against\nwhich the input string is tested. Crossplane will throw an error if the\nkey is not a valid regexp." + +## obj spec.environment.patches.transforms.math + +"Math is used to transform the input via mathematical operations such as\nmultiplication." + +### fn spec.environment.patches.transforms.math.withClampMax + +```ts +withClampMax(clampMax) +``` + +"ClampMax makes sure that the value is not bigger than the given value." + +### fn spec.environment.patches.transforms.math.withClampMin + +```ts +withClampMin(clampMin) +``` + +"ClampMin makes sure that the value is not smaller than the given value." + +### fn spec.environment.patches.transforms.math.withMultiply + +```ts +withMultiply(multiply) +``` + +"Multiply the value." + +### fn spec.environment.patches.transforms.math.withType + +```ts +withType(type) +``` + +"Type of the math transform to be run." + +## obj spec.environment.patches.transforms.string + +"String is used to transform the input into a string or a different kind\nof string. Note that the input does not necessarily need to be a string." + +### fn spec.environment.patches.transforms.string.withConvert + +```ts +withConvert(convert) +``` + +"Optional conversion method to be specified.\n`ToUpper` and `ToLower` change the letter case of the input string.\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\n`ToJson` converts any input value into its raw JSON representation.\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\nconverted to JSON.\n`ToAdler32` generate a addler32 hash based on the input string." + +### fn spec.environment.patches.transforms.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +### fn spec.environment.patches.transforms.string.withTrim + +```ts +withTrim(trim) +``` + +"Trim the prefix or suffix from the input" + +### fn spec.environment.patches.transforms.string.withType + +```ts +withType(type) +``` + +"Type of the string transform to be run." + +## obj spec.environment.patches.transforms.string.join + +"Join defines parameters to join a slice of values to a string." + +### fn spec.environment.patches.transforms.string.join.withSeparator + +```ts +withSeparator(separator) +``` + +"Separator defines the character that should separate the values from each\nother in the joined string." + +## obj spec.environment.patches.transforms.string.regexp + +"Extract a match from the input using a regular expression." + +### fn spec.environment.patches.transforms.string.regexp.withGroup + +```ts +withGroup(group) +``` + +"Group number to match. 0 (the default) matches the entire expression." + +### fn spec.environment.patches.transforms.string.regexp.withMatch + +```ts +withMatch(match) +``` + +"Match string. May optionally include submatches, aka capture groups.\nSee https://pkg.go.dev/regexp/ for details." + +## obj spec.environment.policy + +"Policy represents the Resolve and Resolution policies which apply to\nall EnvironmentSourceReferences in EnvironmentConfigs list." + +### fn spec.environment.policy.withResolution + +```ts +withResolution(resolution) +``` + +"Resolution specifies whether resolution of this reference is required.\nThe default is 'Required', which means the reconcile will fail if the\nreference cannot be resolved. 'Optional' means this reference will be\na no-op if it cannot be resolved." + +### fn spec.environment.policy.withResolve + +```ts +withResolve(resolve) +``` + +"Resolve specifies when this reference should be resolved. The default\nis 'IfNotPresent', which will attempt to resolve the reference only when\nthe corresponding field is not present. Use 'Always' to resolve the\nreference on every reconcile." + +## obj spec.patchSets + +"PatchSets define a named set of patches that may be included by any\nresource in this Composition. PatchSets cannot themselves refer to other\nPatchSets.\n\n\nPatchSets are only used by the \"Resources\" mode of Composition. They\nare ignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +### fn spec.patchSets.withName + +```ts +withName(name) +``` + +"Name of this PatchSet." + +### fn spec.patchSets.withPatches + +```ts +withPatches(patches) +``` + +"Patches will be applied as an overlay to the base resource." + +### fn spec.patchSets.withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +"Patches will be applied as an overlay to the base resource." + +**Note:** This function appends passed data to existing values + +## obj spec.patchSets.patches + +"Patches will be applied as an overlay to the base resource." + +### fn spec.patchSets.patches.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the resource whose value is\nto be used as input. Required when type is FromCompositeFieldPath,\nFromEnvironmentFieldPath, ToCompositeFieldPath, ToEnvironmentFieldPath." + +### fn spec.patchSets.patches.withPatchSetName + +```ts +withPatchSetName(patchSetName) +``` + +"PatchSetName to include patches from. Required when type is PatchSet." + +### fn spec.patchSets.patches.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath is the path of the field on the resource whose value will\nbe changed with the result of transforms. Leave empty if you'd like to\npropagate to the same path as fromFieldPath." + +### fn spec.patchSets.patches.withTransforms + +```ts +withTransforms(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.patchSets.patches.withTransformsMixin + +```ts +withTransformsMixin(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +**Note:** This function appends passed data to existing values + +### fn spec.patchSets.patches.withType + +```ts +withType(type) +``` + +"Type sets the patching behaviour to be used. Each patch type may require\nits own fields to be set on the Patch object." + +## obj spec.patchSets.patches.combine + +"Combine is the patch configuration for a CombineFromComposite,\nCombineFromEnvironment, CombineToComposite or CombineToEnvironment patch." + +### fn spec.patchSets.patches.combine.withStrategy + +```ts +withStrategy(strategy) +``` + +"Strategy defines the strategy to use to combine the input variable values.\nCurrently only string is supported." + +### fn spec.patchSets.patches.combine.withVariables + +```ts +withVariables(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.patchSets.patches.combine.withVariablesMixin + +```ts +withVariablesMixin(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +**Note:** This function appends passed data to existing values + +## obj spec.patchSets.patches.combine.string + +"String declares that input variables should be combined into a single\nstring, using the relevant settings for formatting purposes." + +### fn spec.patchSets.patches.combine.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +## obj spec.patchSets.patches.combine.variables + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.patchSets.patches.combine.variables.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the source whose value is\nto be used as input." + +## obj spec.patchSets.patches.policy + +"Policy configures the specifics of patching behaviour." + +### fn spec.patchSets.patches.policy.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath specifies how to patch from a field path. The default is\n'Optional', which means the patch will be a no-op if the specified\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\nthe specified path does not exist." + +## obj spec.patchSets.patches.policy.mergeOptions + +"MergeOptions Specifies merge options on a field path." + +### fn spec.patchSets.patches.policy.mergeOptions.withAppendSlice + +```ts +withAppendSlice(appendSlice) +``` + +"Specifies that already existing elements in a merged slice should be preserved" + +### fn spec.patchSets.patches.policy.mergeOptions.withKeepMapValues + +```ts +withKeepMapValues(keepMapValues) +``` + +"Specifies that already existing values in a merged map should be preserved" + +## obj spec.patchSets.patches.transforms + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.patchSets.patches.transforms.withMap + +```ts +withMap(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +### fn spec.patchSets.patches.transforms.withMapMixin + +```ts +withMapMixin(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +**Note:** This function appends passed data to existing values + +### fn spec.patchSets.patches.transforms.withType + +```ts +withType(type) +``` + +"Type of the transform to be run." + +## obj spec.patchSets.patches.transforms.convert + +"Convert is used to cast the input into the given output type." + +### fn spec.patchSets.patches.transforms.convert.withFormat + +```ts +withFormat(format) +``` + +"The expected input format.\n\n\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\nOnly used during `string -> float64` conversions.\n* `json` - parses the input as a JSON string.\nOnly used during `string -> object` or `string -> list` conversions.\n\n\nIf this property is null, the default conversion is applied." + +### fn spec.patchSets.patches.transforms.convert.withToType + +```ts +withToType(toType) +``` + +"ToType is the type of the output of this transform." + +## obj spec.patchSets.patches.transforms.match + +"Match is a more complex version of Map that matches a list of patterns." + +### fn spec.patchSets.patches.transforms.match.withFallbackTo + +```ts +withFallbackTo(fallbackTo) +``` + +"Determines to what value the transform should fallback if no pattern matches." + +### fn spec.patchSets.patches.transforms.match.withFallbackValue + +```ts +withFallbackValue(fallbackValue) +``` + +"The fallback value that should be returned by the transform if now pattern\nmatches." + +### fn spec.patchSets.patches.transforms.match.withPatterns + +```ts +withPatterns(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.patchSets.patches.transforms.match.withPatternsMixin + +```ts +withPatternsMixin(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +**Note:** This function appends passed data to existing values + +## obj spec.patchSets.patches.transforms.match.patterns + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.patchSets.patches.transforms.match.patterns.withLiteral + +```ts +withLiteral(literal) +``` + +"Literal exactly matches the input string (case sensitive).\nIs required if `type` is `literal`." + +### fn spec.patchSets.patches.transforms.match.patterns.withRegexp + +```ts +withRegexp(regexp) +``` + +"Regexp to match against the input string.\nIs required if `type` is `regexp`." + +### fn spec.patchSets.patches.transforms.match.patterns.withResult + +```ts +withResult(result) +``` + +"The value that is used as result of the transform if the pattern matches." + +### fn spec.patchSets.patches.transforms.match.patterns.withType + +```ts +withType(type) +``` + +"Type specifies how the pattern matches the input.\n\n\n* `literal` - the pattern value has to exactly match (case sensitive) the\ninput string. This is the default.\n\n\n* `regexp` - the pattern treated as a regular expression against\nwhich the input string is tested. Crossplane will throw an error if the\nkey is not a valid regexp." + +## obj spec.patchSets.patches.transforms.math + +"Math is used to transform the input via mathematical operations such as\nmultiplication." + +### fn spec.patchSets.patches.transforms.math.withClampMax + +```ts +withClampMax(clampMax) +``` + +"ClampMax makes sure that the value is not bigger than the given value." + +### fn spec.patchSets.patches.transforms.math.withClampMin + +```ts +withClampMin(clampMin) +``` + +"ClampMin makes sure that the value is not smaller than the given value." + +### fn spec.patchSets.patches.transforms.math.withMultiply + +```ts +withMultiply(multiply) +``` + +"Multiply the value." + +### fn spec.patchSets.patches.transforms.math.withType + +```ts +withType(type) +``` + +"Type of the math transform to be run." + +## obj spec.patchSets.patches.transforms.string + +"String is used to transform the input into a string or a different kind\nof string. Note that the input does not necessarily need to be a string." + +### fn spec.patchSets.patches.transforms.string.withConvert + +```ts +withConvert(convert) +``` + +"Optional conversion method to be specified.\n`ToUpper` and `ToLower` change the letter case of the input string.\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\n`ToJson` converts any input value into its raw JSON representation.\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\nconverted to JSON.\n`ToAdler32` generate a addler32 hash based on the input string." + +### fn spec.patchSets.patches.transforms.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +### fn spec.patchSets.patches.transforms.string.withTrim + +```ts +withTrim(trim) +``` + +"Trim the prefix or suffix from the input" + +### fn spec.patchSets.patches.transforms.string.withType + +```ts +withType(type) +``` + +"Type of the string transform to be run." + +## obj spec.patchSets.patches.transforms.string.join + +"Join defines parameters to join a slice of values to a string." + +### fn spec.patchSets.patches.transforms.string.join.withSeparator + +```ts +withSeparator(separator) +``` + +"Separator defines the character that should separate the values from each\nother in the joined string." + +## obj spec.patchSets.patches.transforms.string.regexp + +"Extract a match from the input using a regular expression." + +### fn spec.patchSets.patches.transforms.string.regexp.withGroup + +```ts +withGroup(group) +``` + +"Group number to match. 0 (the default) matches the entire expression." + +### fn spec.patchSets.patches.transforms.string.regexp.withMatch + +```ts +withMatch(match) +``` + +"Match string. May optionally include submatches, aka capture groups.\nSee https://pkg.go.dev/regexp/ for details." + +## obj spec.pipeline + +"Pipeline is a list of composition function steps that will be used when a\ncomposite resource referring to this composition is created. One of\nresources and pipeline must be specified - you cannot specify both.\n\n\nThe Pipeline is only used by the \"Pipeline\" mode of Composition. It is\nignored by other modes." + +### fn spec.pipeline.withCredentials + +```ts +withCredentials(credentials) +``` + +"Credentials are optional credentials that the Composition Function needs." + +### fn spec.pipeline.withCredentialsMixin + +```ts +withCredentialsMixin(credentials) +``` + +"Credentials are optional credentials that the Composition Function needs." + +**Note:** This function appends passed data to existing values + +### fn spec.pipeline.withInput + +```ts +withInput(input) +``` + +"Input is an optional, arbitrary Kubernetes resource (i.e. a resource\nwith an apiVersion and kind) that will be passed to the Composition\nFunction as the 'input' of its RunFunctionRequest." + +### fn spec.pipeline.withInputMixin + +```ts +withInputMixin(input) +``` + +"Input is an optional, arbitrary Kubernetes resource (i.e. a resource\nwith an apiVersion and kind) that will be passed to the Composition\nFunction as the 'input' of its RunFunctionRequest." + +**Note:** This function appends passed data to existing values + +### fn spec.pipeline.withStep + +```ts +withStep(step) +``` + +"Step name. Must be unique within its Pipeline." + +## obj spec.pipeline.credentials + +"Credentials are optional credentials that the Composition Function needs." + +### fn spec.pipeline.credentials.withName + +```ts +withName(name) +``` + +"Name of this set of credentials." + +### fn spec.pipeline.credentials.withSource + +```ts +withSource(source) +``` + +"Source of the function credentials." + +## obj spec.pipeline.credentials.secretRef + +"A SecretRef is a reference to a secret containing credentials that should\nbe supplied to the function." + +### fn spec.pipeline.credentials.secretRef.withName + +```ts +withName(name) +``` + +"Name of the secret." + +### fn spec.pipeline.credentials.secretRef.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace of the secret." + +## obj spec.pipeline.functionRef + +"FunctionRef is a reference to the Composition Function this step should\nexecute." + +### fn spec.pipeline.functionRef.withName + +```ts +withName(name) +``` + +"Name of the referenced Function." + +## obj spec.publishConnectionDetailsWithStoreConfigRef + +"PublishConnectionDetailsWithStoreConfig specifies the secret store config\nwith which the connection details of composite resources dynamically\nprovisioned using this composition will be published.\n\n\nTHIS IS AN ALPHA FIELD. Do not use it in production. It is not honored\nunless the relevant Crossplane feature flag is enabled, and may be\nchanged or removed without notice." + +### fn spec.publishConnectionDetailsWithStoreConfigRef.withName + +```ts +withName(name) +``` + +"Name of the referenced StoreConfig." + +## obj spec.resources + +"Resources is a list of resource templates that will be used when a\ncomposite resource referring to this composition is created.\n\n\nResources are only used by the \"Resources\" mode of Composition. They are\nignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +### fn spec.resources.withBase + +```ts +withBase(base) +``` + +"Base is the target resource that the patches will be applied on." + +### fn spec.resources.withBaseMixin + +```ts +withBaseMixin(base) +``` + +"Base is the target resource that the patches will be applied on." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.withConnectionDetails + +```ts +withConnectionDetails(connectionDetails) +``` + +"ConnectionDetails lists the propagation secret keys from this target\nresource to the composition instance connection secret." + +### fn spec.resources.withConnectionDetailsMixin + +```ts +withConnectionDetailsMixin(connectionDetails) +``` + +"ConnectionDetails lists the propagation secret keys from this target\nresource to the composition instance connection secret." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.withName + +```ts +withName(name) +``` + +"A Name uniquely identifies this entry within its Composition's resources\narray. Names are optional but *strongly* recommended. When all entries in\nthe resources array are named entries may added, deleted, and reordered\nas long as their names do not change. When entries are not named the\nlength and order of the resources array should be treated as immutable.\nEither all or no entries must be named." + +### fn spec.resources.withPatches + +```ts +withPatches(patches) +``` + +"Patches will be applied as overlay to the base resource." + +### fn spec.resources.withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +"Patches will be applied as overlay to the base resource." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.withReadinessChecks + +```ts +withReadinessChecks(readinessChecks) +``` + +"ReadinessChecks allows users to define custom readiness checks. All checks\nhave to return true in order for resource to be considered ready. The\ndefault readiness check is to have the \"Ready\" condition to be \"True\"." + +### fn spec.resources.withReadinessChecksMixin + +```ts +withReadinessChecksMixin(readinessChecks) +``` + +"ReadinessChecks allows users to define custom readiness checks. All checks\nhave to return true in order for resource to be considered ready. The\ndefault readiness check is to have the \"Ready\" condition to be \"True\"." + +**Note:** This function appends passed data to existing values + +## obj spec.resources.connectionDetails + +"ConnectionDetails lists the propagation secret keys from this target\nresource to the composition instance connection secret." + +### fn spec.resources.connectionDetails.withFromConnectionSecretKey + +```ts +withFromConnectionSecretKey(fromConnectionSecretKey) +``` + +"FromConnectionSecretKey is the key that will be used to fetch the value\nfrom the composed resource's connection secret." + +### fn spec.resources.connectionDetails.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the composed resource whose\nvalue to be used as input. Name must be specified if the type is\nFromFieldPath." + +### fn spec.resources.connectionDetails.withName + +```ts +withName(name) +``` + +"Name of the connection secret key that will be propagated to the\nconnection secret of the composition instance. Leave empty if you'd like\nto use the same key name." + +### fn spec.resources.connectionDetails.withType + +```ts +withType(type) +``` + +"Type sets the connection detail fetching behaviour to be used. Each\nconnection detail type may require its own fields to be set on the\nConnectionDetail object. If the type is omitted Crossplane will attempt\nto infer it based on which other fields were specified. If multiple\nfields are specified the order of precedence is:\n1. FromValue\n2. FromConnectionSecretKey\n3. FromFieldPath" + +### fn spec.resources.connectionDetails.withValue + +```ts +withValue(value) +``` + +"Value that will be propagated to the connection secret of the composite\nresource. May be set to inject a fixed, non-sensitive connection secret\nvalue, for example a well-known port." + +## obj spec.resources.patches + +"Patches will be applied as overlay to the base resource." + +### fn spec.resources.patches.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the resource whose value is\nto be used as input. Required when type is FromCompositeFieldPath,\nFromEnvironmentFieldPath, ToCompositeFieldPath, ToEnvironmentFieldPath." + +### fn spec.resources.patches.withPatchSetName + +```ts +withPatchSetName(patchSetName) +``` + +"PatchSetName to include patches from. Required when type is PatchSet." + +### fn spec.resources.patches.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath is the path of the field on the resource whose value will\nbe changed with the result of transforms. Leave empty if you'd like to\npropagate to the same path as fromFieldPath." + +### fn spec.resources.patches.withTransforms + +```ts +withTransforms(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.resources.patches.withTransformsMixin + +```ts +withTransformsMixin(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.patches.withType + +```ts +withType(type) +``` + +"Type sets the patching behaviour to be used. Each patch type may require\nits own fields to be set on the Patch object." + +## obj spec.resources.patches.combine + +"Combine is the patch configuration for a CombineFromComposite,\nCombineFromEnvironment, CombineToComposite or CombineToEnvironment patch." + +### fn spec.resources.patches.combine.withStrategy + +```ts +withStrategy(strategy) +``` + +"Strategy defines the strategy to use to combine the input variable values.\nCurrently only string is supported." + +### fn spec.resources.patches.combine.withVariables + +```ts +withVariables(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.resources.patches.combine.withVariablesMixin + +```ts +withVariablesMixin(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +**Note:** This function appends passed data to existing values + +## obj spec.resources.patches.combine.string + +"String declares that input variables should be combined into a single\nstring, using the relevant settings for formatting purposes." + +### fn spec.resources.patches.combine.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +## obj spec.resources.patches.combine.variables + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.resources.patches.combine.variables.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the source whose value is\nto be used as input." + +## obj spec.resources.patches.policy + +"Policy configures the specifics of patching behaviour." + +### fn spec.resources.patches.policy.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath specifies how to patch from a field path. The default is\n'Optional', which means the patch will be a no-op if the specified\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\nthe specified path does not exist." + +## obj spec.resources.patches.policy.mergeOptions + +"MergeOptions Specifies merge options on a field path." + +### fn spec.resources.patches.policy.mergeOptions.withAppendSlice + +```ts +withAppendSlice(appendSlice) +``` + +"Specifies that already existing elements in a merged slice should be preserved" + +### fn spec.resources.patches.policy.mergeOptions.withKeepMapValues + +```ts +withKeepMapValues(keepMapValues) +``` + +"Specifies that already existing values in a merged map should be preserved" + +## obj spec.resources.patches.transforms + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.resources.patches.transforms.withMap + +```ts +withMap(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +### fn spec.resources.patches.transforms.withMapMixin + +```ts +withMapMixin(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.patches.transforms.withType + +```ts +withType(type) +``` + +"Type of the transform to be run." + +## obj spec.resources.patches.transforms.convert + +"Convert is used to cast the input into the given output type." + +### fn spec.resources.patches.transforms.convert.withFormat + +```ts +withFormat(format) +``` + +"The expected input format.\n\n\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\nOnly used during `string -> float64` conversions.\n* `json` - parses the input as a JSON string.\nOnly used during `string -> object` or `string -> list` conversions.\n\n\nIf this property is null, the default conversion is applied." + +### fn spec.resources.patches.transforms.convert.withToType + +```ts +withToType(toType) +``` + +"ToType is the type of the output of this transform." + +## obj spec.resources.patches.transforms.match + +"Match is a more complex version of Map that matches a list of patterns." + +### fn spec.resources.patches.transforms.match.withFallbackTo + +```ts +withFallbackTo(fallbackTo) +``` + +"Determines to what value the transform should fallback if no pattern matches." + +### fn spec.resources.patches.transforms.match.withFallbackValue + +```ts +withFallbackValue(fallbackValue) +``` + +"The fallback value that should be returned by the transform if now pattern\nmatches." + +### fn spec.resources.patches.transforms.match.withPatterns + +```ts +withPatterns(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.resources.patches.transforms.match.withPatternsMixin + +```ts +withPatternsMixin(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +**Note:** This function appends passed data to existing values + +## obj spec.resources.patches.transforms.match.patterns + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.resources.patches.transforms.match.patterns.withLiteral + +```ts +withLiteral(literal) +``` + +"Literal exactly matches the input string (case sensitive).\nIs required if `type` is `literal`." + +### fn spec.resources.patches.transforms.match.patterns.withRegexp + +```ts +withRegexp(regexp) +``` + +"Regexp to match against the input string.\nIs required if `type` is `regexp`." + +### fn spec.resources.patches.transforms.match.patterns.withResult + +```ts +withResult(result) +``` + +"The value that is used as result of the transform if the pattern matches." + +### fn spec.resources.patches.transforms.match.patterns.withType + +```ts +withType(type) +``` + +"Type specifies how the pattern matches the input.\n\n\n* `literal` - the pattern value has to exactly match (case sensitive) the\ninput string. This is the default.\n\n\n* `regexp` - the pattern treated as a regular expression against\nwhich the input string is tested. Crossplane will throw an error if the\nkey is not a valid regexp." + +## obj spec.resources.patches.transforms.math + +"Math is used to transform the input via mathematical operations such as\nmultiplication." + +### fn spec.resources.patches.transforms.math.withClampMax + +```ts +withClampMax(clampMax) +``` + +"ClampMax makes sure that the value is not bigger than the given value." + +### fn spec.resources.patches.transforms.math.withClampMin + +```ts +withClampMin(clampMin) +``` + +"ClampMin makes sure that the value is not smaller than the given value." + +### fn spec.resources.patches.transforms.math.withMultiply + +```ts +withMultiply(multiply) +``` + +"Multiply the value." + +### fn spec.resources.patches.transforms.math.withType + +```ts +withType(type) +``` + +"Type of the math transform to be run." + +## obj spec.resources.patches.transforms.string + +"String is used to transform the input into a string or a different kind\nof string. Note that the input does not necessarily need to be a string." + +### fn spec.resources.patches.transforms.string.withConvert + +```ts +withConvert(convert) +``` + +"Optional conversion method to be specified.\n`ToUpper` and `ToLower` change the letter case of the input string.\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\n`ToJson` converts any input value into its raw JSON representation.\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\nconverted to JSON.\n`ToAdler32` generate a addler32 hash based on the input string." + +### fn spec.resources.patches.transforms.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +### fn spec.resources.patches.transforms.string.withTrim + +```ts +withTrim(trim) +``` + +"Trim the prefix or suffix from the input" + +### fn spec.resources.patches.transforms.string.withType + +```ts +withType(type) +``` + +"Type of the string transform to be run." + +## obj spec.resources.patches.transforms.string.join + +"Join defines parameters to join a slice of values to a string." + +### fn spec.resources.patches.transforms.string.join.withSeparator + +```ts +withSeparator(separator) +``` + +"Separator defines the character that should separate the values from each\nother in the joined string." + +## obj spec.resources.patches.transforms.string.regexp + +"Extract a match from the input using a regular expression." + +### fn spec.resources.patches.transforms.string.regexp.withGroup + +```ts +withGroup(group) +``` + +"Group number to match. 0 (the default) matches the entire expression." + +### fn spec.resources.patches.transforms.string.regexp.withMatch + +```ts +withMatch(match) +``` + +"Match string. May optionally include submatches, aka capture groups.\nSee https://pkg.go.dev/regexp/ for details." + +## obj spec.resources.readinessChecks + +"ReadinessChecks allows users to define custom readiness checks. All checks\nhave to return true in order for resource to be considered ready. The\ndefault readiness check is to have the \"Ready\" condition to be \"True\"." + +### fn spec.resources.readinessChecks.withFieldPath + +```ts +withFieldPath(fieldPath) +``` + +"FieldPath shows the path of the field whose value will be used." + +### fn spec.resources.readinessChecks.withMatchInteger + +```ts +withMatchInteger(matchInteger) +``` + +"MatchInt is the value you'd like to match if you're using \"MatchInt\" type." + +### fn spec.resources.readinessChecks.withMatchString + +```ts +withMatchString(matchString) +``` + +"MatchString is the value you'd like to match if you're using \"MatchString\" type." + +### fn spec.resources.readinessChecks.withType + +```ts +withType(type) +``` + +"Type indicates the type of probe you'd like to use." + +## obj spec.resources.readinessChecks.matchCondition + +"MatchCondition specifies the condition you'd like to match if you're using \"MatchCondition\" type." + +### fn spec.resources.readinessChecks.matchCondition.withType + +```ts +withType(type) +``` + +"Type indicates the type of condition you'd like to use." \ No newline at end of file diff --git a/docs/crossplane/1.17/apiextensions/v1/compositionRevision.md b/docs/crossplane/1.17/apiextensions/v1/compositionRevision.md new file mode 100644 index 0000000..909c55d --- /dev/null +++ b/docs/crossplane/1.17/apiextensions/v1/compositionRevision.md @@ -0,0 +1,2159 @@ +--- +permalink: /crossplane/1.17/apiextensions/v1/compositionRevision/ +--- + +# apiextensions.v1.compositionRevision + +"A CompositionRevision represents a revision of a Composition. Crossplane\ncreates new revisions when there are changes to the Composition.\n\n\nCrossplane creates and manages CompositionRevisions. Don't directly edit\nCompositionRevisions." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withMode(mode)`](#fn-specwithmode) + * [`fn withPatchSets(patchSets)`](#fn-specwithpatchsets) + * [`fn withPatchSetsMixin(patchSets)`](#fn-specwithpatchsetsmixin) + * [`fn withPipeline(pipeline)`](#fn-specwithpipeline) + * [`fn withPipelineMixin(pipeline)`](#fn-specwithpipelinemixin) + * [`fn withResources(resources)`](#fn-specwithresources) + * [`fn withResourcesMixin(resources)`](#fn-specwithresourcesmixin) + * [`fn withRevision(revision)`](#fn-specwithrevision) + * [`fn withWriteConnectionSecretsToNamespace(writeConnectionSecretsToNamespace)`](#fn-specwithwriteconnectionsecretstonamespace) + * [`obj spec.compositeTypeRef`](#obj-speccompositetyperef) + * [`fn withApiVersion(apiVersion)`](#fn-speccompositetyperefwithapiversion) + * [`fn withKind(kind)`](#fn-speccompositetyperefwithkind) + * [`obj spec.environment`](#obj-specenvironment) + * [`fn withDefaultData(defaultData)`](#fn-specenvironmentwithdefaultdata) + * [`fn withDefaultDataMixin(defaultData)`](#fn-specenvironmentwithdefaultdatamixin) + * [`fn withEnvironmentConfigs(environmentConfigs)`](#fn-specenvironmentwithenvironmentconfigs) + * [`fn withEnvironmentConfigsMixin(environmentConfigs)`](#fn-specenvironmentwithenvironmentconfigsmixin) + * [`fn withPatches(patches)`](#fn-specenvironmentwithpatches) + * [`fn withPatchesMixin(patches)`](#fn-specenvironmentwithpatchesmixin) + * [`obj spec.environment.environmentConfigs`](#obj-specenvironmentenvironmentconfigs) + * [`fn withType(type)`](#fn-specenvironmentenvironmentconfigswithtype) + * [`obj spec.environment.environmentConfigs.ref`](#obj-specenvironmentenvironmentconfigsref) + * [`fn withName(name)`](#fn-specenvironmentenvironmentconfigsrefwithname) + * [`obj spec.environment.environmentConfigs.selector`](#obj-specenvironmentenvironmentconfigsselector) + * [`fn withMatchLabels(matchLabels)`](#fn-specenvironmentenvironmentconfigsselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specenvironmentenvironmentconfigsselectorwithmatchlabelsmixin) + * [`fn withMaxMatch(maxMatch)`](#fn-specenvironmentenvironmentconfigsselectorwithmaxmatch) + * [`fn withMinMatch(minMatch)`](#fn-specenvironmentenvironmentconfigsselectorwithminmatch) + * [`fn withMode(mode)`](#fn-specenvironmentenvironmentconfigsselectorwithmode) + * [`fn withSortByFieldPath(sortByFieldPath)`](#fn-specenvironmentenvironmentconfigsselectorwithsortbyfieldpath) + * [`obj spec.environment.environmentConfigs.selector.matchLabels`](#obj-specenvironmentenvironmentconfigsselectormatchlabels) + * [`fn withFromFieldPathPolicy(fromFieldPathPolicy)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithfromfieldpathpolicy) + * [`fn withKey(key)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithkey) + * [`fn withType(type)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithtype) + * [`fn withValue(value)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithvalue) + * [`fn withValueFromFieldPath(valueFromFieldPath)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithvaluefromfieldpath) + * [`obj spec.environment.patches`](#obj-specenvironmentpatches) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specenvironmentpatcheswithfromfieldpath) + * [`fn withToFieldPath(toFieldPath)`](#fn-specenvironmentpatcheswithtofieldpath) + * [`fn withTransforms(transforms)`](#fn-specenvironmentpatcheswithtransforms) + * [`fn withTransformsMixin(transforms)`](#fn-specenvironmentpatcheswithtransformsmixin) + * [`fn withType(type)`](#fn-specenvironmentpatcheswithtype) + * [`obj spec.environment.patches.combine`](#obj-specenvironmentpatchescombine) + * [`fn withStrategy(strategy)`](#fn-specenvironmentpatchescombinewithstrategy) + * [`fn withVariables(variables)`](#fn-specenvironmentpatchescombinewithvariables) + * [`fn withVariablesMixin(variables)`](#fn-specenvironmentpatchescombinewithvariablesmixin) + * [`obj spec.environment.patches.combine.string`](#obj-specenvironmentpatchescombinestring) + * [`fn withFmt(fmt)`](#fn-specenvironmentpatchescombinestringwithfmt) + * [`obj spec.environment.patches.combine.variables`](#obj-specenvironmentpatchescombinevariables) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specenvironmentpatchescombinevariableswithfromfieldpath) + * [`obj spec.environment.patches.policy`](#obj-specenvironmentpatchespolicy) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specenvironmentpatchespolicywithfromfieldpath) + * [`obj spec.environment.patches.policy.mergeOptions`](#obj-specenvironmentpatchespolicymergeoptions) + * [`fn withAppendSlice(appendSlice)`](#fn-specenvironmentpatchespolicymergeoptionswithappendslice) + * [`fn withKeepMapValues(keepMapValues)`](#fn-specenvironmentpatchespolicymergeoptionswithkeepmapvalues) + * [`obj spec.environment.patches.transforms`](#obj-specenvironmentpatchestransforms) + * [`fn withMap(map)`](#fn-specenvironmentpatchestransformswithmap) + * [`fn withMapMixin(map)`](#fn-specenvironmentpatchestransformswithmapmixin) + * [`fn withType(type)`](#fn-specenvironmentpatchestransformswithtype) + * [`obj spec.environment.patches.transforms.convert`](#obj-specenvironmentpatchestransformsconvert) + * [`fn withFormat(format)`](#fn-specenvironmentpatchestransformsconvertwithformat) + * [`fn withToType(toType)`](#fn-specenvironmentpatchestransformsconvertwithtotype) + * [`obj spec.environment.patches.transforms.match`](#obj-specenvironmentpatchestransformsmatch) + * [`fn withFallbackTo(fallbackTo)`](#fn-specenvironmentpatchestransformsmatchwithfallbackto) + * [`fn withFallbackValue(fallbackValue)`](#fn-specenvironmentpatchestransformsmatchwithfallbackvalue) + * [`fn withPatterns(patterns)`](#fn-specenvironmentpatchestransformsmatchwithpatterns) + * [`fn withPatternsMixin(patterns)`](#fn-specenvironmentpatchestransformsmatchwithpatternsmixin) + * [`obj spec.environment.patches.transforms.match.patterns`](#obj-specenvironmentpatchestransformsmatchpatterns) + * [`fn withLiteral(literal)`](#fn-specenvironmentpatchestransformsmatchpatternswithliteral) + * [`fn withRegexp(regexp)`](#fn-specenvironmentpatchestransformsmatchpatternswithregexp) + * [`fn withResult(result)`](#fn-specenvironmentpatchestransformsmatchpatternswithresult) + * [`fn withType(type)`](#fn-specenvironmentpatchestransformsmatchpatternswithtype) + * [`obj spec.environment.patches.transforms.math`](#obj-specenvironmentpatchestransformsmath) + * [`fn withClampMax(clampMax)`](#fn-specenvironmentpatchestransformsmathwithclampmax) + * [`fn withClampMin(clampMin)`](#fn-specenvironmentpatchestransformsmathwithclampmin) + * [`fn withMultiply(multiply)`](#fn-specenvironmentpatchestransformsmathwithmultiply) + * [`fn withType(type)`](#fn-specenvironmentpatchestransformsmathwithtype) + * [`obj spec.environment.patches.transforms.string`](#obj-specenvironmentpatchestransformsstring) + * [`fn withConvert(convert)`](#fn-specenvironmentpatchestransformsstringwithconvert) + * [`fn withFmt(fmt)`](#fn-specenvironmentpatchestransformsstringwithfmt) + * [`fn withTrim(trim)`](#fn-specenvironmentpatchestransformsstringwithtrim) + * [`fn withType(type)`](#fn-specenvironmentpatchestransformsstringwithtype) + * [`obj spec.environment.patches.transforms.string.join`](#obj-specenvironmentpatchestransformsstringjoin) + * [`fn withSeparator(separator)`](#fn-specenvironmentpatchestransformsstringjoinwithseparator) + * [`obj spec.environment.patches.transforms.string.regexp`](#obj-specenvironmentpatchestransformsstringregexp) + * [`fn withGroup(group)`](#fn-specenvironmentpatchestransformsstringregexpwithgroup) + * [`fn withMatch(match)`](#fn-specenvironmentpatchestransformsstringregexpwithmatch) + * [`obj spec.environment.policy`](#obj-specenvironmentpolicy) + * [`fn withResolution(resolution)`](#fn-specenvironmentpolicywithresolution) + * [`fn withResolve(resolve)`](#fn-specenvironmentpolicywithresolve) + * [`obj spec.patchSets`](#obj-specpatchsets) + * [`fn withName(name)`](#fn-specpatchsetswithname) + * [`fn withPatches(patches)`](#fn-specpatchsetswithpatches) + * [`fn withPatchesMixin(patches)`](#fn-specpatchsetswithpatchesmixin) + * [`obj spec.patchSets.patches`](#obj-specpatchsetspatches) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specpatchsetspatcheswithfromfieldpath) + * [`fn withPatchSetName(patchSetName)`](#fn-specpatchsetspatcheswithpatchsetname) + * [`fn withToFieldPath(toFieldPath)`](#fn-specpatchsetspatcheswithtofieldpath) + * [`fn withTransforms(transforms)`](#fn-specpatchsetspatcheswithtransforms) + * [`fn withTransformsMixin(transforms)`](#fn-specpatchsetspatcheswithtransformsmixin) + * [`fn withType(type)`](#fn-specpatchsetspatcheswithtype) + * [`obj spec.patchSets.patches.combine`](#obj-specpatchsetspatchescombine) + * [`fn withStrategy(strategy)`](#fn-specpatchsetspatchescombinewithstrategy) + * [`fn withVariables(variables)`](#fn-specpatchsetspatchescombinewithvariables) + * [`fn withVariablesMixin(variables)`](#fn-specpatchsetspatchescombinewithvariablesmixin) + * [`obj spec.patchSets.patches.combine.string`](#obj-specpatchsetspatchescombinestring) + * [`fn withFmt(fmt)`](#fn-specpatchsetspatchescombinestringwithfmt) + * [`obj spec.patchSets.patches.combine.variables`](#obj-specpatchsetspatchescombinevariables) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specpatchsetspatchescombinevariableswithfromfieldpath) + * [`obj spec.patchSets.patches.policy`](#obj-specpatchsetspatchespolicy) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specpatchsetspatchespolicywithfromfieldpath) + * [`obj spec.patchSets.patches.policy.mergeOptions`](#obj-specpatchsetspatchespolicymergeoptions) + * [`fn withAppendSlice(appendSlice)`](#fn-specpatchsetspatchespolicymergeoptionswithappendslice) + * [`fn withKeepMapValues(keepMapValues)`](#fn-specpatchsetspatchespolicymergeoptionswithkeepmapvalues) + * [`obj spec.patchSets.patches.transforms`](#obj-specpatchsetspatchestransforms) + * [`fn withMap(map)`](#fn-specpatchsetspatchestransformswithmap) + * [`fn withMapMixin(map)`](#fn-specpatchsetspatchestransformswithmapmixin) + * [`fn withType(type)`](#fn-specpatchsetspatchestransformswithtype) + * [`obj spec.patchSets.patches.transforms.convert`](#obj-specpatchsetspatchestransformsconvert) + * [`fn withFormat(format)`](#fn-specpatchsetspatchestransformsconvertwithformat) + * [`fn withToType(toType)`](#fn-specpatchsetspatchestransformsconvertwithtotype) + * [`obj spec.patchSets.patches.transforms.match`](#obj-specpatchsetspatchestransformsmatch) + * [`fn withFallbackTo(fallbackTo)`](#fn-specpatchsetspatchestransformsmatchwithfallbackto) + * [`fn withFallbackValue(fallbackValue)`](#fn-specpatchsetspatchestransformsmatchwithfallbackvalue) + * [`fn withPatterns(patterns)`](#fn-specpatchsetspatchestransformsmatchwithpatterns) + * [`fn withPatternsMixin(patterns)`](#fn-specpatchsetspatchestransformsmatchwithpatternsmixin) + * [`obj spec.patchSets.patches.transforms.match.patterns`](#obj-specpatchsetspatchestransformsmatchpatterns) + * [`fn withLiteral(literal)`](#fn-specpatchsetspatchestransformsmatchpatternswithliteral) + * [`fn withRegexp(regexp)`](#fn-specpatchsetspatchestransformsmatchpatternswithregexp) + * [`fn withResult(result)`](#fn-specpatchsetspatchestransformsmatchpatternswithresult) + * [`fn withType(type)`](#fn-specpatchsetspatchestransformsmatchpatternswithtype) + * [`obj spec.patchSets.patches.transforms.math`](#obj-specpatchsetspatchestransformsmath) + * [`fn withClampMax(clampMax)`](#fn-specpatchsetspatchestransformsmathwithclampmax) + * [`fn withClampMin(clampMin)`](#fn-specpatchsetspatchestransformsmathwithclampmin) + * [`fn withMultiply(multiply)`](#fn-specpatchsetspatchestransformsmathwithmultiply) + * [`fn withType(type)`](#fn-specpatchsetspatchestransformsmathwithtype) + * [`obj spec.patchSets.patches.transforms.string`](#obj-specpatchsetspatchestransformsstring) + * [`fn withConvert(convert)`](#fn-specpatchsetspatchestransformsstringwithconvert) + * [`fn withFmt(fmt)`](#fn-specpatchsetspatchestransformsstringwithfmt) + * [`fn withTrim(trim)`](#fn-specpatchsetspatchestransformsstringwithtrim) + * [`fn withType(type)`](#fn-specpatchsetspatchestransformsstringwithtype) + * [`obj spec.patchSets.patches.transforms.string.join`](#obj-specpatchsetspatchestransformsstringjoin) + * [`fn withSeparator(separator)`](#fn-specpatchsetspatchestransformsstringjoinwithseparator) + * [`obj spec.patchSets.patches.transforms.string.regexp`](#obj-specpatchsetspatchestransformsstringregexp) + * [`fn withGroup(group)`](#fn-specpatchsetspatchestransformsstringregexpwithgroup) + * [`fn withMatch(match)`](#fn-specpatchsetspatchestransformsstringregexpwithmatch) + * [`obj spec.pipeline`](#obj-specpipeline) + * [`fn withCredentials(credentials)`](#fn-specpipelinewithcredentials) + * [`fn withCredentialsMixin(credentials)`](#fn-specpipelinewithcredentialsmixin) + * [`fn withInput(input)`](#fn-specpipelinewithinput) + * [`fn withInputMixin(input)`](#fn-specpipelinewithinputmixin) + * [`fn withStep(step)`](#fn-specpipelinewithstep) + * [`obj spec.pipeline.credentials`](#obj-specpipelinecredentials) + * [`fn withName(name)`](#fn-specpipelinecredentialswithname) + * [`fn withSource(source)`](#fn-specpipelinecredentialswithsource) + * [`obj spec.pipeline.credentials.secretRef`](#obj-specpipelinecredentialssecretref) + * [`fn withName(name)`](#fn-specpipelinecredentialssecretrefwithname) + * [`fn withNamespace(namespace)`](#fn-specpipelinecredentialssecretrefwithnamespace) + * [`obj spec.pipeline.functionRef`](#obj-specpipelinefunctionref) + * [`fn withName(name)`](#fn-specpipelinefunctionrefwithname) + * [`obj spec.publishConnectionDetailsWithStoreConfigRef`](#obj-specpublishconnectiondetailswithstoreconfigref) + * [`fn withName(name)`](#fn-specpublishconnectiondetailswithstoreconfigrefwithname) + * [`obj spec.resources`](#obj-specresources) + * [`fn withBase(base)`](#fn-specresourceswithbase) + * [`fn withBaseMixin(base)`](#fn-specresourceswithbasemixin) + * [`fn withConnectionDetails(connectionDetails)`](#fn-specresourceswithconnectiondetails) + * [`fn withConnectionDetailsMixin(connectionDetails)`](#fn-specresourceswithconnectiondetailsmixin) + * [`fn withName(name)`](#fn-specresourceswithname) + * [`fn withPatches(patches)`](#fn-specresourceswithpatches) + * [`fn withPatchesMixin(patches)`](#fn-specresourceswithpatchesmixin) + * [`fn withReadinessChecks(readinessChecks)`](#fn-specresourceswithreadinesschecks) + * [`fn withReadinessChecksMixin(readinessChecks)`](#fn-specresourceswithreadinesschecksmixin) + * [`obj spec.resources.connectionDetails`](#obj-specresourcesconnectiondetails) + * [`fn withFromConnectionSecretKey(fromConnectionSecretKey)`](#fn-specresourcesconnectiondetailswithfromconnectionsecretkey) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specresourcesconnectiondetailswithfromfieldpath) + * [`fn withName(name)`](#fn-specresourcesconnectiondetailswithname) + * [`fn withType(type)`](#fn-specresourcesconnectiondetailswithtype) + * [`fn withValue(value)`](#fn-specresourcesconnectiondetailswithvalue) + * [`obj spec.resources.patches`](#obj-specresourcespatches) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specresourcespatcheswithfromfieldpath) + * [`fn withPatchSetName(patchSetName)`](#fn-specresourcespatcheswithpatchsetname) + * [`fn withToFieldPath(toFieldPath)`](#fn-specresourcespatcheswithtofieldpath) + * [`fn withTransforms(transforms)`](#fn-specresourcespatcheswithtransforms) + * [`fn withTransformsMixin(transforms)`](#fn-specresourcespatcheswithtransformsmixin) + * [`fn withType(type)`](#fn-specresourcespatcheswithtype) + * [`obj spec.resources.patches.combine`](#obj-specresourcespatchescombine) + * [`fn withStrategy(strategy)`](#fn-specresourcespatchescombinewithstrategy) + * [`fn withVariables(variables)`](#fn-specresourcespatchescombinewithvariables) + * [`fn withVariablesMixin(variables)`](#fn-specresourcespatchescombinewithvariablesmixin) + * [`obj spec.resources.patches.combine.string`](#obj-specresourcespatchescombinestring) + * [`fn withFmt(fmt)`](#fn-specresourcespatchescombinestringwithfmt) + * [`obj spec.resources.patches.combine.variables`](#obj-specresourcespatchescombinevariables) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specresourcespatchescombinevariableswithfromfieldpath) + * [`obj spec.resources.patches.policy`](#obj-specresourcespatchespolicy) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specresourcespatchespolicywithfromfieldpath) + * [`obj spec.resources.patches.policy.mergeOptions`](#obj-specresourcespatchespolicymergeoptions) + * [`fn withAppendSlice(appendSlice)`](#fn-specresourcespatchespolicymergeoptionswithappendslice) + * [`fn withKeepMapValues(keepMapValues)`](#fn-specresourcespatchespolicymergeoptionswithkeepmapvalues) + * [`obj spec.resources.patches.transforms`](#obj-specresourcespatchestransforms) + * [`fn withMap(map)`](#fn-specresourcespatchestransformswithmap) + * [`fn withMapMixin(map)`](#fn-specresourcespatchestransformswithmapmixin) + * [`fn withType(type)`](#fn-specresourcespatchestransformswithtype) + * [`obj spec.resources.patches.transforms.convert`](#obj-specresourcespatchestransformsconvert) + * [`fn withFormat(format)`](#fn-specresourcespatchestransformsconvertwithformat) + * [`fn withToType(toType)`](#fn-specresourcespatchestransformsconvertwithtotype) + * [`obj spec.resources.patches.transforms.match`](#obj-specresourcespatchestransformsmatch) + * [`fn withFallbackTo(fallbackTo)`](#fn-specresourcespatchestransformsmatchwithfallbackto) + * [`fn withFallbackValue(fallbackValue)`](#fn-specresourcespatchestransformsmatchwithfallbackvalue) + * [`fn withPatterns(patterns)`](#fn-specresourcespatchestransformsmatchwithpatterns) + * [`fn withPatternsMixin(patterns)`](#fn-specresourcespatchestransformsmatchwithpatternsmixin) + * [`obj spec.resources.patches.transforms.match.patterns`](#obj-specresourcespatchestransformsmatchpatterns) + * [`fn withLiteral(literal)`](#fn-specresourcespatchestransformsmatchpatternswithliteral) + * [`fn withRegexp(regexp)`](#fn-specresourcespatchestransformsmatchpatternswithregexp) + * [`fn withResult(result)`](#fn-specresourcespatchestransformsmatchpatternswithresult) + * [`fn withType(type)`](#fn-specresourcespatchestransformsmatchpatternswithtype) + * [`obj spec.resources.patches.transforms.math`](#obj-specresourcespatchestransformsmath) + * [`fn withClampMax(clampMax)`](#fn-specresourcespatchestransformsmathwithclampmax) + * [`fn withClampMin(clampMin)`](#fn-specresourcespatchestransformsmathwithclampmin) + * [`fn withMultiply(multiply)`](#fn-specresourcespatchestransformsmathwithmultiply) + * [`fn withType(type)`](#fn-specresourcespatchestransformsmathwithtype) + * [`obj spec.resources.patches.transforms.string`](#obj-specresourcespatchestransformsstring) + * [`fn withConvert(convert)`](#fn-specresourcespatchestransformsstringwithconvert) + * [`fn withFmt(fmt)`](#fn-specresourcespatchestransformsstringwithfmt) + * [`fn withTrim(trim)`](#fn-specresourcespatchestransformsstringwithtrim) + * [`fn withType(type)`](#fn-specresourcespatchestransformsstringwithtype) + * [`obj spec.resources.patches.transforms.string.join`](#obj-specresourcespatchestransformsstringjoin) + * [`fn withSeparator(separator)`](#fn-specresourcespatchestransformsstringjoinwithseparator) + * [`obj spec.resources.patches.transforms.string.regexp`](#obj-specresourcespatchestransformsstringregexp) + * [`fn withGroup(group)`](#fn-specresourcespatchestransformsstringregexpwithgroup) + * [`fn withMatch(match)`](#fn-specresourcespatchestransformsstringregexpwithmatch) + * [`obj spec.resources.readinessChecks`](#obj-specresourcesreadinesschecks) + * [`fn withFieldPath(fieldPath)`](#fn-specresourcesreadinesscheckswithfieldpath) + * [`fn withMatchInteger(matchInteger)`](#fn-specresourcesreadinesscheckswithmatchinteger) + * [`fn withMatchString(matchString)`](#fn-specresourcesreadinesscheckswithmatchstring) + * [`fn withType(type)`](#fn-specresourcesreadinesscheckswithtype) + * [`obj spec.resources.readinessChecks.matchCondition`](#obj-specresourcesreadinesschecksmatchcondition) + * [`fn withType(type)`](#fn-specresourcesreadinesschecksmatchconditionwithtype) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of CompositionRevision + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"CompositionRevisionSpec specifies the desired state of the composition\nrevision." + +### fn spec.withMode + +```ts +withMode(mode) +``` + +"Mode controls what type or \"mode\" of Composition will be used.\n\n\n\"Pipeline\" indicates that a Composition specifies a pipeline of\nComposition Functions, each of which is responsible for producing\ncomposed resources that Crossplane should create or update.\n\n\n\"Resources\" indicates that a Composition uses what is commonly referred\nto as \"Patch & Transform\" or P&T composition. This mode of Composition\nuses an array of resources, each a template for a composed resource.\n\n\nAll Compositions should use Pipeline mode. Resources mode is deprecated.\nResources mode won't be removed in Crossplane 1.x, and will remain the\ndefault to avoid breaking legacy Compositions. However, it's no longer\naccepting new features, and only accepting security related bug fixes." + +### fn spec.withPatchSets + +```ts +withPatchSets(patchSets) +``` + +"PatchSets define a named set of patches that may be included by any\nresource in this Composition. PatchSets cannot themselves refer to other\nPatchSets.\n\n\nPatchSets are only used by the \"Resources\" mode of Composition. They\nare ignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +### fn spec.withPatchSetsMixin + +```ts +withPatchSetsMixin(patchSets) +``` + +"PatchSets define a named set of patches that may be included by any\nresource in this Composition. PatchSets cannot themselves refer to other\nPatchSets.\n\n\nPatchSets are only used by the \"Resources\" mode of Composition. They\nare ignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +**Note:** This function appends passed data to existing values + +### fn spec.withPipeline + +```ts +withPipeline(pipeline) +``` + +"Pipeline is a list of composition function steps that will be used when a\ncomposite resource referring to this composition is created. One of\nresources and pipeline must be specified - you cannot specify both.\n\n\nThe Pipeline is only used by the \"Pipeline\" mode of Composition. It is\nignored by other modes." + +### fn spec.withPipelineMixin + +```ts +withPipelineMixin(pipeline) +``` + +"Pipeline is a list of composition function steps that will be used when a\ncomposite resource referring to this composition is created. One of\nresources and pipeline must be specified - you cannot specify both.\n\n\nThe Pipeline is only used by the \"Pipeline\" mode of Composition. It is\nignored by other modes." + +**Note:** This function appends passed data to existing values + +### fn spec.withResources + +```ts +withResources(resources) +``` + +"Resources is a list of resource templates that will be used when a\ncomposite resource referring to this composition is created.\n\n\nResources are only used by the \"Resources\" mode of Composition. They are\nignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +### fn spec.withResourcesMixin + +```ts +withResourcesMixin(resources) +``` + +"Resources is a list of resource templates that will be used when a\ncomposite resource referring to this composition is created.\n\n\nResources are only used by the \"Resources\" mode of Composition. They are\nignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +**Note:** This function appends passed data to existing values + +### fn spec.withRevision + +```ts +withRevision(revision) +``` + +"Revision number. Newer revisions have larger numbers." + +### fn spec.withWriteConnectionSecretsToNamespace + +```ts +withWriteConnectionSecretsToNamespace(writeConnectionSecretsToNamespace) +``` + +"WriteConnectionSecretsToNamespace specifies the namespace in which the\nconnection secrets of composite resource dynamically provisioned using\nthis composition will be created.\nThis field is planned to be replaced in a future release in favor of\nPublishConnectionDetailsWithStoreConfigRef. Currently, both could be\nset independently and connection details would be published to both\nwithout affecting each other as long as related fields at MR level\nspecified." + +## obj spec.compositeTypeRef + +"CompositeTypeRef specifies the type of composite resource that this\ncomposition is compatible with." + +### fn spec.compositeTypeRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"APIVersion of the type." + +### fn spec.compositeTypeRef.withKind + +```ts +withKind(kind) +``` + +"Kind of the type." + +## obj spec.environment + +"Environment configures the environment in which resources are rendered.\n\n\nTHIS IS AN ALPHA FIELD. Do not use it in production. It is not honored\nunless the relevant Crossplane feature flag is enabled, and may be\nchanged or removed without notice." + +### fn spec.environment.withDefaultData + +```ts +withDefaultData(defaultData) +``` + +"DefaultData statically defines the initial state of the environment.\nIt has the same schema-less structure as the data field in\nenvironment configs.\nIt is overwritten by the selected environment configs." + +### fn spec.environment.withDefaultDataMixin + +```ts +withDefaultDataMixin(defaultData) +``` + +"DefaultData statically defines the initial state of the environment.\nIt has the same schema-less structure as the data field in\nenvironment configs.\nIt is overwritten by the selected environment configs." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.withEnvironmentConfigs + +```ts +withEnvironmentConfigs(environmentConfigs) +``` + +"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\nresources are stored in the composite resource at\n`spec.environmentConfigRefs` and is only updated if it is null.\n\n\nThe list of references is used to compute an in-memory environment at\ncompose time. The data of all object is merged in the order they are\nlisted, meaning the values of EnvironmentConfigs with a larger index take\npriority over ones with smaller indices.\n\n\nThe computed environment can be accessed in a composition using\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches." + +### fn spec.environment.withEnvironmentConfigsMixin + +```ts +withEnvironmentConfigsMixin(environmentConfigs) +``` + +"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\nresources are stored in the composite resource at\n`spec.environmentConfigRefs` and is only updated if it is null.\n\n\nThe list of references is used to compute an in-memory environment at\ncompose time. The data of all object is merged in the order they are\nlisted, meaning the values of EnvironmentConfigs with a larger index take\npriority over ones with smaller indices.\n\n\nThe computed environment can be accessed in a composition using\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.withPatches + +```ts +withPatches(patches) +``` + +"Patches is a list of environment patches that are executed before a\ncomposition's resources are composed." + +### fn spec.environment.withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +"Patches is a list of environment patches that are executed before a\ncomposition's resources are composed." + +**Note:** This function appends passed data to existing values + +## obj spec.environment.environmentConfigs + +"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\nresources are stored in the composite resource at\n`spec.environmentConfigRefs` and is only updated if it is null.\n\n\nThe list of references is used to compute an in-memory environment at\ncompose time. The data of all object is merged in the order they are\nlisted, meaning the values of EnvironmentConfigs with a larger index take\npriority over ones with smaller indices.\n\n\nThe computed environment can be accessed in a composition using\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches." + +### fn spec.environment.environmentConfigs.withType + +```ts +withType(type) +``` + +"Type specifies the way the EnvironmentConfig is selected.\nDefault is `Reference`" + +## obj spec.environment.environmentConfigs.ref + +"Ref is a named reference to a single EnvironmentConfig.\nEither Ref or Selector is required." + +### fn spec.environment.environmentConfigs.ref.withName + +```ts +withName(name) +``` + +"The name of the object." + +## obj spec.environment.environmentConfigs.selector + +"Selector selects EnvironmentConfig(s) via labels." + +### fn spec.environment.environmentConfigs.selector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"MatchLabels ensures an object with matching labels is selected." + +### fn spec.environment.environmentConfigs.selector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"MatchLabels ensures an object with matching labels is selected." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.environmentConfigs.selector.withMaxMatch + +```ts +withMaxMatch(maxMatch) +``` + +"MaxMatch specifies the number of extracted EnvironmentConfigs in Multiple mode, extracts all if nil." + +### fn spec.environment.environmentConfigs.selector.withMinMatch + +```ts +withMinMatch(minMatch) +``` + +"MinMatch specifies the required minimum of extracted EnvironmentConfigs in Multiple mode." + +### fn spec.environment.environmentConfigs.selector.withMode + +```ts +withMode(mode) +``` + +"Mode specifies retrieval strategy: \"Single\" or \"Multiple\"." + +### fn spec.environment.environmentConfigs.selector.withSortByFieldPath + +```ts +withSortByFieldPath(sortByFieldPath) +``` + +"SortByFieldPath is the path to the field based on which list of EnvironmentConfigs is alphabetically sorted." + +## obj spec.environment.environmentConfigs.selector.matchLabels + +"MatchLabels ensures an object with matching labels is selected." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withFromFieldPathPolicy + +```ts +withFromFieldPathPolicy(fromFieldPathPolicy) +``` + +"FromFieldPathPolicy specifies the policy for the valueFromFieldPath.\nThe default is Required, meaning that an error will be returned if the\nfield is not found in the composite resource.\nOptional means that if the field is not found in the composite resource,\nthat label pair will just be skipped. N.B. other specified label\nmatchers will still be used to retrieve the desired\nenvironment config, if any." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withKey + +```ts +withKey(key) +``` + +"Key of the label to match." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withType + +```ts +withType(type) +``` + +"Type specifies where the value for a label comes from." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withValue + +```ts +withValue(value) +``` + +"Value specifies a literal label value." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withValueFromFieldPath + +```ts +withValueFromFieldPath(valueFromFieldPath) +``` + +"ValueFromFieldPath specifies the field path to look for the label value." + +## obj spec.environment.patches + +"Patches is a list of environment patches that are executed before a\ncomposition's resources are composed." + +### fn spec.environment.patches.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the resource whose value is\nto be used as input. Required when type is FromCompositeFieldPath or\nToCompositeFieldPath." + +### fn spec.environment.patches.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath is the path of the field on the resource whose value will\nbe changed with the result of transforms. Leave empty if you'd like to\npropagate to the same path as fromFieldPath." + +### fn spec.environment.patches.withTransforms + +```ts +withTransforms(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.environment.patches.withTransformsMixin + +```ts +withTransformsMixin(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.patches.withType + +```ts +withType(type) +``` + +"Type sets the patching behaviour to be used. Each patch type may require\nits own fields to be set on the Patch object." + +## obj spec.environment.patches.combine + +"Combine is the patch configuration for a CombineFromComposite or\nCombineToComposite patch." + +### fn spec.environment.patches.combine.withStrategy + +```ts +withStrategy(strategy) +``` + +"Strategy defines the strategy to use to combine the input variable values.\nCurrently only string is supported." + +### fn spec.environment.patches.combine.withVariables + +```ts +withVariables(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.environment.patches.combine.withVariablesMixin + +```ts +withVariablesMixin(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +**Note:** This function appends passed data to existing values + +## obj spec.environment.patches.combine.string + +"String declares that input variables should be combined into a single\nstring, using the relevant settings for formatting purposes." + +### fn spec.environment.patches.combine.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +## obj spec.environment.patches.combine.variables + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.environment.patches.combine.variables.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the source whose value is\nto be used as input." + +## obj spec.environment.patches.policy + +"Policy configures the specifics of patching behaviour." + +### fn spec.environment.patches.policy.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath specifies how to patch from a field path. The default is\n'Optional', which means the patch will be a no-op if the specified\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\nthe specified path does not exist." + +## obj spec.environment.patches.policy.mergeOptions + +"MergeOptions Specifies merge options on a field path." + +### fn spec.environment.patches.policy.mergeOptions.withAppendSlice + +```ts +withAppendSlice(appendSlice) +``` + +"Specifies that already existing elements in a merged slice should be preserved" + +### fn spec.environment.patches.policy.mergeOptions.withKeepMapValues + +```ts +withKeepMapValues(keepMapValues) +``` + +"Specifies that already existing values in a merged map should be preserved" + +## obj spec.environment.patches.transforms + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.environment.patches.transforms.withMap + +```ts +withMap(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +### fn spec.environment.patches.transforms.withMapMixin + +```ts +withMapMixin(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.patches.transforms.withType + +```ts +withType(type) +``` + +"Type of the transform to be run." + +## obj spec.environment.patches.transforms.convert + +"Convert is used to cast the input into the given output type." + +### fn spec.environment.patches.transforms.convert.withFormat + +```ts +withFormat(format) +``` + +"The expected input format.\n\n\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\nOnly used during `string -> float64` conversions.\n* `json` - parses the input as a JSON string.\nOnly used during `string -> object` or `string -> list` conversions.\n\n\nIf this property is null, the default conversion is applied." + +### fn spec.environment.patches.transforms.convert.withToType + +```ts +withToType(toType) +``` + +"ToType is the type of the output of this transform." + +## obj spec.environment.patches.transforms.match + +"Match is a more complex version of Map that matches a list of patterns." + +### fn spec.environment.patches.transforms.match.withFallbackTo + +```ts +withFallbackTo(fallbackTo) +``` + +"Determines to what value the transform should fallback if no pattern matches." + +### fn spec.environment.patches.transforms.match.withFallbackValue + +```ts +withFallbackValue(fallbackValue) +``` + +"The fallback value that should be returned by the transform if now pattern\nmatches." + +### fn spec.environment.patches.transforms.match.withPatterns + +```ts +withPatterns(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.environment.patches.transforms.match.withPatternsMixin + +```ts +withPatternsMixin(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +**Note:** This function appends passed data to existing values + +## obj spec.environment.patches.transforms.match.patterns + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.environment.patches.transforms.match.patterns.withLiteral + +```ts +withLiteral(literal) +``` + +"Literal exactly matches the input string (case sensitive).\nIs required if `type` is `literal`." + +### fn spec.environment.patches.transforms.match.patterns.withRegexp + +```ts +withRegexp(regexp) +``` + +"Regexp to match against the input string.\nIs required if `type` is `regexp`." + +### fn spec.environment.patches.transforms.match.patterns.withResult + +```ts +withResult(result) +``` + +"The value that is used as result of the transform if the pattern matches." + +### fn spec.environment.patches.transforms.match.patterns.withType + +```ts +withType(type) +``` + +"Type specifies how the pattern matches the input.\n\n\n* `literal` - the pattern value has to exactly match (case sensitive) the\ninput string. This is the default.\n\n\n* `regexp` - the pattern treated as a regular expression against\nwhich the input string is tested. Crossplane will throw an error if the\nkey is not a valid regexp." + +## obj spec.environment.patches.transforms.math + +"Math is used to transform the input via mathematical operations such as\nmultiplication." + +### fn spec.environment.patches.transforms.math.withClampMax + +```ts +withClampMax(clampMax) +``` + +"ClampMax makes sure that the value is not bigger than the given value." + +### fn spec.environment.patches.transforms.math.withClampMin + +```ts +withClampMin(clampMin) +``` + +"ClampMin makes sure that the value is not smaller than the given value." + +### fn spec.environment.patches.transforms.math.withMultiply + +```ts +withMultiply(multiply) +``` + +"Multiply the value." + +### fn spec.environment.patches.transforms.math.withType + +```ts +withType(type) +``` + +"Type of the math transform to be run." + +## obj spec.environment.patches.transforms.string + +"String is used to transform the input into a string or a different kind\nof string. Note that the input does not necessarily need to be a string." + +### fn spec.environment.patches.transforms.string.withConvert + +```ts +withConvert(convert) +``` + +"Optional conversion method to be specified.\n`ToUpper` and `ToLower` change the letter case of the input string.\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\n`ToJson` converts any input value into its raw JSON representation.\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\nconverted to JSON.\n`ToAdler32` generate a addler32 hash based on the input string." + +### fn spec.environment.patches.transforms.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +### fn spec.environment.patches.transforms.string.withTrim + +```ts +withTrim(trim) +``` + +"Trim the prefix or suffix from the input" + +### fn spec.environment.patches.transforms.string.withType + +```ts +withType(type) +``` + +"Type of the string transform to be run." + +## obj spec.environment.patches.transforms.string.join + +"Join defines parameters to join a slice of values to a string." + +### fn spec.environment.patches.transforms.string.join.withSeparator + +```ts +withSeparator(separator) +``` + +"Separator defines the character that should separate the values from each\nother in the joined string." + +## obj spec.environment.patches.transforms.string.regexp + +"Extract a match from the input using a regular expression." + +### fn spec.environment.patches.transforms.string.regexp.withGroup + +```ts +withGroup(group) +``` + +"Group number to match. 0 (the default) matches the entire expression." + +### fn spec.environment.patches.transforms.string.regexp.withMatch + +```ts +withMatch(match) +``` + +"Match string. May optionally include submatches, aka capture groups.\nSee https://pkg.go.dev/regexp/ for details." + +## obj spec.environment.policy + +"Policy represents the Resolve and Resolution policies which apply to\nall EnvironmentSourceReferences in EnvironmentConfigs list." + +### fn spec.environment.policy.withResolution + +```ts +withResolution(resolution) +``` + +"Resolution specifies whether resolution of this reference is required.\nThe default is 'Required', which means the reconcile will fail if the\nreference cannot be resolved. 'Optional' means this reference will be\na no-op if it cannot be resolved." + +### fn spec.environment.policy.withResolve + +```ts +withResolve(resolve) +``` + +"Resolve specifies when this reference should be resolved. The default\nis 'IfNotPresent', which will attempt to resolve the reference only when\nthe corresponding field is not present. Use 'Always' to resolve the\nreference on every reconcile." + +## obj spec.patchSets + +"PatchSets define a named set of patches that may be included by any\nresource in this Composition. PatchSets cannot themselves refer to other\nPatchSets.\n\n\nPatchSets are only used by the \"Resources\" mode of Composition. They\nare ignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +### fn spec.patchSets.withName + +```ts +withName(name) +``` + +"Name of this PatchSet." + +### fn spec.patchSets.withPatches + +```ts +withPatches(patches) +``` + +"Patches will be applied as an overlay to the base resource." + +### fn spec.patchSets.withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +"Patches will be applied as an overlay to the base resource." + +**Note:** This function appends passed data to existing values + +## obj spec.patchSets.patches + +"Patches will be applied as an overlay to the base resource." + +### fn spec.patchSets.patches.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the resource whose value is\nto be used as input. Required when type is FromCompositeFieldPath,\nFromEnvironmentFieldPath, ToCompositeFieldPath, ToEnvironmentFieldPath." + +### fn spec.patchSets.patches.withPatchSetName + +```ts +withPatchSetName(patchSetName) +``` + +"PatchSetName to include patches from. Required when type is PatchSet." + +### fn spec.patchSets.patches.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath is the path of the field on the resource whose value will\nbe changed with the result of transforms. Leave empty if you'd like to\npropagate to the same path as fromFieldPath." + +### fn spec.patchSets.patches.withTransforms + +```ts +withTransforms(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.patchSets.patches.withTransformsMixin + +```ts +withTransformsMixin(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +**Note:** This function appends passed data to existing values + +### fn spec.patchSets.patches.withType + +```ts +withType(type) +``` + +"Type sets the patching behaviour to be used. Each patch type may require\nits own fields to be set on the Patch object." + +## obj spec.patchSets.patches.combine + +"Combine is the patch configuration for a CombineFromComposite,\nCombineFromEnvironment, CombineToComposite or CombineToEnvironment patch." + +### fn spec.patchSets.patches.combine.withStrategy + +```ts +withStrategy(strategy) +``` + +"Strategy defines the strategy to use to combine the input variable values.\nCurrently only string is supported." + +### fn spec.patchSets.patches.combine.withVariables + +```ts +withVariables(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.patchSets.patches.combine.withVariablesMixin + +```ts +withVariablesMixin(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +**Note:** This function appends passed data to existing values + +## obj spec.patchSets.patches.combine.string + +"String declares that input variables should be combined into a single\nstring, using the relevant settings for formatting purposes." + +### fn spec.patchSets.patches.combine.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +## obj spec.patchSets.patches.combine.variables + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.patchSets.patches.combine.variables.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the source whose value is\nto be used as input." + +## obj spec.patchSets.patches.policy + +"Policy configures the specifics of patching behaviour." + +### fn spec.patchSets.patches.policy.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath specifies how to patch from a field path. The default is\n'Optional', which means the patch will be a no-op if the specified\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\nthe specified path does not exist." + +## obj spec.patchSets.patches.policy.mergeOptions + +"MergeOptions Specifies merge options on a field path." + +### fn spec.patchSets.patches.policy.mergeOptions.withAppendSlice + +```ts +withAppendSlice(appendSlice) +``` + +"Specifies that already existing elements in a merged slice should be preserved" + +### fn spec.patchSets.patches.policy.mergeOptions.withKeepMapValues + +```ts +withKeepMapValues(keepMapValues) +``` + +"Specifies that already existing values in a merged map should be preserved" + +## obj spec.patchSets.patches.transforms + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.patchSets.patches.transforms.withMap + +```ts +withMap(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +### fn spec.patchSets.patches.transforms.withMapMixin + +```ts +withMapMixin(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +**Note:** This function appends passed data to existing values + +### fn spec.patchSets.patches.transforms.withType + +```ts +withType(type) +``` + +"Type of the transform to be run." + +## obj spec.patchSets.patches.transforms.convert + +"Convert is used to cast the input into the given output type." + +### fn spec.patchSets.patches.transforms.convert.withFormat + +```ts +withFormat(format) +``` + +"The expected input format.\n\n\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\nOnly used during `string -> float64` conversions.\n* `json` - parses the input as a JSON string.\nOnly used during `string -> object` or `string -> list` conversions.\n\n\nIf this property is null, the default conversion is applied." + +### fn spec.patchSets.patches.transforms.convert.withToType + +```ts +withToType(toType) +``` + +"ToType is the type of the output of this transform." + +## obj spec.patchSets.patches.transforms.match + +"Match is a more complex version of Map that matches a list of patterns." + +### fn spec.patchSets.patches.transforms.match.withFallbackTo + +```ts +withFallbackTo(fallbackTo) +``` + +"Determines to what value the transform should fallback if no pattern matches." + +### fn spec.patchSets.patches.transforms.match.withFallbackValue + +```ts +withFallbackValue(fallbackValue) +``` + +"The fallback value that should be returned by the transform if now pattern\nmatches." + +### fn spec.patchSets.patches.transforms.match.withPatterns + +```ts +withPatterns(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.patchSets.patches.transforms.match.withPatternsMixin + +```ts +withPatternsMixin(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +**Note:** This function appends passed data to existing values + +## obj spec.patchSets.patches.transforms.match.patterns + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.patchSets.patches.transforms.match.patterns.withLiteral + +```ts +withLiteral(literal) +``` + +"Literal exactly matches the input string (case sensitive).\nIs required if `type` is `literal`." + +### fn spec.patchSets.patches.transforms.match.patterns.withRegexp + +```ts +withRegexp(regexp) +``` + +"Regexp to match against the input string.\nIs required if `type` is `regexp`." + +### fn spec.patchSets.patches.transforms.match.patterns.withResult + +```ts +withResult(result) +``` + +"The value that is used as result of the transform if the pattern matches." + +### fn spec.patchSets.patches.transforms.match.patterns.withType + +```ts +withType(type) +``` + +"Type specifies how the pattern matches the input.\n\n\n* `literal` - the pattern value has to exactly match (case sensitive) the\ninput string. This is the default.\n\n\n* `regexp` - the pattern treated as a regular expression against\nwhich the input string is tested. Crossplane will throw an error if the\nkey is not a valid regexp." + +## obj spec.patchSets.patches.transforms.math + +"Math is used to transform the input via mathematical operations such as\nmultiplication." + +### fn spec.patchSets.patches.transforms.math.withClampMax + +```ts +withClampMax(clampMax) +``` + +"ClampMax makes sure that the value is not bigger than the given value." + +### fn spec.patchSets.patches.transforms.math.withClampMin + +```ts +withClampMin(clampMin) +``` + +"ClampMin makes sure that the value is not smaller than the given value." + +### fn spec.patchSets.patches.transforms.math.withMultiply + +```ts +withMultiply(multiply) +``` + +"Multiply the value." + +### fn spec.patchSets.patches.transforms.math.withType + +```ts +withType(type) +``` + +"Type of the math transform to be run." + +## obj spec.patchSets.patches.transforms.string + +"String is used to transform the input into a string or a different kind\nof string. Note that the input does not necessarily need to be a string." + +### fn spec.patchSets.patches.transforms.string.withConvert + +```ts +withConvert(convert) +``` + +"Optional conversion method to be specified.\n`ToUpper` and `ToLower` change the letter case of the input string.\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\n`ToJson` converts any input value into its raw JSON representation.\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\nconverted to JSON.\n`ToAdler32` generate a addler32 hash based on the input string." + +### fn spec.patchSets.patches.transforms.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +### fn spec.patchSets.patches.transforms.string.withTrim + +```ts +withTrim(trim) +``` + +"Trim the prefix or suffix from the input" + +### fn spec.patchSets.patches.transforms.string.withType + +```ts +withType(type) +``` + +"Type of the string transform to be run." + +## obj spec.patchSets.patches.transforms.string.join + +"Join defines parameters to join a slice of values to a string." + +### fn spec.patchSets.patches.transforms.string.join.withSeparator + +```ts +withSeparator(separator) +``` + +"Separator defines the character that should separate the values from each\nother in the joined string." + +## obj spec.patchSets.patches.transforms.string.regexp + +"Extract a match from the input using a regular expression." + +### fn spec.patchSets.patches.transforms.string.regexp.withGroup + +```ts +withGroup(group) +``` + +"Group number to match. 0 (the default) matches the entire expression." + +### fn spec.patchSets.patches.transforms.string.regexp.withMatch + +```ts +withMatch(match) +``` + +"Match string. May optionally include submatches, aka capture groups.\nSee https://pkg.go.dev/regexp/ for details." + +## obj spec.pipeline + +"Pipeline is a list of composition function steps that will be used when a\ncomposite resource referring to this composition is created. One of\nresources and pipeline must be specified - you cannot specify both.\n\n\nThe Pipeline is only used by the \"Pipeline\" mode of Composition. It is\nignored by other modes." + +### fn spec.pipeline.withCredentials + +```ts +withCredentials(credentials) +``` + +"Credentials are optional credentials that the Composition Function needs." + +### fn spec.pipeline.withCredentialsMixin + +```ts +withCredentialsMixin(credentials) +``` + +"Credentials are optional credentials that the Composition Function needs." + +**Note:** This function appends passed data to existing values + +### fn spec.pipeline.withInput + +```ts +withInput(input) +``` + +"Input is an optional, arbitrary Kubernetes resource (i.e. a resource\nwith an apiVersion and kind) that will be passed to the Composition\nFunction as the 'input' of its RunFunctionRequest." + +### fn spec.pipeline.withInputMixin + +```ts +withInputMixin(input) +``` + +"Input is an optional, arbitrary Kubernetes resource (i.e. a resource\nwith an apiVersion and kind) that will be passed to the Composition\nFunction as the 'input' of its RunFunctionRequest." + +**Note:** This function appends passed data to existing values + +### fn spec.pipeline.withStep + +```ts +withStep(step) +``` + +"Step name. Must be unique within its Pipeline." + +## obj spec.pipeline.credentials + +"Credentials are optional credentials that the Composition Function needs." + +### fn spec.pipeline.credentials.withName + +```ts +withName(name) +``` + +"Name of this set of credentials." + +### fn spec.pipeline.credentials.withSource + +```ts +withSource(source) +``` + +"Source of the function credentials." + +## obj spec.pipeline.credentials.secretRef + +"A SecretRef is a reference to a secret containing credentials that should\nbe supplied to the function." + +### fn spec.pipeline.credentials.secretRef.withName + +```ts +withName(name) +``` + +"Name of the secret." + +### fn spec.pipeline.credentials.secretRef.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace of the secret." + +## obj spec.pipeline.functionRef + +"FunctionRef is a reference to the Composition Function this step should\nexecute." + +### fn spec.pipeline.functionRef.withName + +```ts +withName(name) +``` + +"Name of the referenced Function." + +## obj spec.publishConnectionDetailsWithStoreConfigRef + +"PublishConnectionDetailsWithStoreConfig specifies the secret store config\nwith which the connection details of composite resources dynamically\nprovisioned using this composition will be published.\n\n\nTHIS IS AN ALPHA FIELD. Do not use it in production. It is not honored\nunless the relevant Crossplane feature flag is enabled, and may be\nchanged or removed without notice." + +### fn spec.publishConnectionDetailsWithStoreConfigRef.withName + +```ts +withName(name) +``` + +"Name of the referenced StoreConfig." + +## obj spec.resources + +"Resources is a list of resource templates that will be used when a\ncomposite resource referring to this composition is created.\n\n\nResources are only used by the \"Resources\" mode of Composition. They are\nignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +### fn spec.resources.withBase + +```ts +withBase(base) +``` + +"Base is the target resource that the patches will be applied on." + +### fn spec.resources.withBaseMixin + +```ts +withBaseMixin(base) +``` + +"Base is the target resource that the patches will be applied on." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.withConnectionDetails + +```ts +withConnectionDetails(connectionDetails) +``` + +"ConnectionDetails lists the propagation secret keys from this target\nresource to the composition instance connection secret." + +### fn spec.resources.withConnectionDetailsMixin + +```ts +withConnectionDetailsMixin(connectionDetails) +``` + +"ConnectionDetails lists the propagation secret keys from this target\nresource to the composition instance connection secret." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.withName + +```ts +withName(name) +``` + +"A Name uniquely identifies this entry within its Composition's resources\narray. Names are optional but *strongly* recommended. When all entries in\nthe resources array are named entries may added, deleted, and reordered\nas long as their names do not change. When entries are not named the\nlength and order of the resources array should be treated as immutable.\nEither all or no entries must be named." + +### fn spec.resources.withPatches + +```ts +withPatches(patches) +``` + +"Patches will be applied as overlay to the base resource." + +### fn spec.resources.withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +"Patches will be applied as overlay to the base resource." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.withReadinessChecks + +```ts +withReadinessChecks(readinessChecks) +``` + +"ReadinessChecks allows users to define custom readiness checks. All checks\nhave to return true in order for resource to be considered ready. The\ndefault readiness check is to have the \"Ready\" condition to be \"True\"." + +### fn spec.resources.withReadinessChecksMixin + +```ts +withReadinessChecksMixin(readinessChecks) +``` + +"ReadinessChecks allows users to define custom readiness checks. All checks\nhave to return true in order for resource to be considered ready. The\ndefault readiness check is to have the \"Ready\" condition to be \"True\"." + +**Note:** This function appends passed data to existing values + +## obj spec.resources.connectionDetails + +"ConnectionDetails lists the propagation secret keys from this target\nresource to the composition instance connection secret." + +### fn spec.resources.connectionDetails.withFromConnectionSecretKey + +```ts +withFromConnectionSecretKey(fromConnectionSecretKey) +``` + +"FromConnectionSecretKey is the key that will be used to fetch the value\nfrom the composed resource's connection secret." + +### fn spec.resources.connectionDetails.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the composed resource whose\nvalue to be used as input. Name must be specified if the type is\nFromFieldPath." + +### fn spec.resources.connectionDetails.withName + +```ts +withName(name) +``` + +"Name of the connection secret key that will be propagated to the\nconnection secret of the composition instance. Leave empty if you'd like\nto use the same key name." + +### fn spec.resources.connectionDetails.withType + +```ts +withType(type) +``` + +"Type sets the connection detail fetching behaviour to be used. Each\nconnection detail type may require its own fields to be set on the\nConnectionDetail object. If the type is omitted Crossplane will attempt\nto infer it based on which other fields were specified. If multiple\nfields are specified the order of precedence is:\n1. FromValue\n2. FromConnectionSecretKey\n3. FromFieldPath" + +### fn spec.resources.connectionDetails.withValue + +```ts +withValue(value) +``` + +"Value that will be propagated to the connection secret of the composite\nresource. May be set to inject a fixed, non-sensitive connection secret\nvalue, for example a well-known port." + +## obj spec.resources.patches + +"Patches will be applied as overlay to the base resource." + +### fn spec.resources.patches.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the resource whose value is\nto be used as input. Required when type is FromCompositeFieldPath,\nFromEnvironmentFieldPath, ToCompositeFieldPath, ToEnvironmentFieldPath." + +### fn spec.resources.patches.withPatchSetName + +```ts +withPatchSetName(patchSetName) +``` + +"PatchSetName to include patches from. Required when type is PatchSet." + +### fn spec.resources.patches.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath is the path of the field on the resource whose value will\nbe changed with the result of transforms. Leave empty if you'd like to\npropagate to the same path as fromFieldPath." + +### fn spec.resources.patches.withTransforms + +```ts +withTransforms(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.resources.patches.withTransformsMixin + +```ts +withTransformsMixin(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.patches.withType + +```ts +withType(type) +``` + +"Type sets the patching behaviour to be used. Each patch type may require\nits own fields to be set on the Patch object." + +## obj spec.resources.patches.combine + +"Combine is the patch configuration for a CombineFromComposite,\nCombineFromEnvironment, CombineToComposite or CombineToEnvironment patch." + +### fn spec.resources.patches.combine.withStrategy + +```ts +withStrategy(strategy) +``` + +"Strategy defines the strategy to use to combine the input variable values.\nCurrently only string is supported." + +### fn spec.resources.patches.combine.withVariables + +```ts +withVariables(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.resources.patches.combine.withVariablesMixin + +```ts +withVariablesMixin(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +**Note:** This function appends passed data to existing values + +## obj spec.resources.patches.combine.string + +"String declares that input variables should be combined into a single\nstring, using the relevant settings for formatting purposes." + +### fn spec.resources.patches.combine.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +## obj spec.resources.patches.combine.variables + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.resources.patches.combine.variables.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the source whose value is\nto be used as input." + +## obj spec.resources.patches.policy + +"Policy configures the specifics of patching behaviour." + +### fn spec.resources.patches.policy.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath specifies how to patch from a field path. The default is\n'Optional', which means the patch will be a no-op if the specified\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\nthe specified path does not exist." + +## obj spec.resources.patches.policy.mergeOptions + +"MergeOptions Specifies merge options on a field path." + +### fn spec.resources.patches.policy.mergeOptions.withAppendSlice + +```ts +withAppendSlice(appendSlice) +``` + +"Specifies that already existing elements in a merged slice should be preserved" + +### fn spec.resources.patches.policy.mergeOptions.withKeepMapValues + +```ts +withKeepMapValues(keepMapValues) +``` + +"Specifies that already existing values in a merged map should be preserved" + +## obj spec.resources.patches.transforms + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.resources.patches.transforms.withMap + +```ts +withMap(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +### fn spec.resources.patches.transforms.withMapMixin + +```ts +withMapMixin(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.patches.transforms.withType + +```ts +withType(type) +``` + +"Type of the transform to be run." + +## obj spec.resources.patches.transforms.convert + +"Convert is used to cast the input into the given output type." + +### fn spec.resources.patches.transforms.convert.withFormat + +```ts +withFormat(format) +``` + +"The expected input format.\n\n\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\nOnly used during `string -> float64` conversions.\n* `json` - parses the input as a JSON string.\nOnly used during `string -> object` or `string -> list` conversions.\n\n\nIf this property is null, the default conversion is applied." + +### fn spec.resources.patches.transforms.convert.withToType + +```ts +withToType(toType) +``` + +"ToType is the type of the output of this transform." + +## obj spec.resources.patches.transforms.match + +"Match is a more complex version of Map that matches a list of patterns." + +### fn spec.resources.patches.transforms.match.withFallbackTo + +```ts +withFallbackTo(fallbackTo) +``` + +"Determines to what value the transform should fallback if no pattern matches." + +### fn spec.resources.patches.transforms.match.withFallbackValue + +```ts +withFallbackValue(fallbackValue) +``` + +"The fallback value that should be returned by the transform if now pattern\nmatches." + +### fn spec.resources.patches.transforms.match.withPatterns + +```ts +withPatterns(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.resources.patches.transforms.match.withPatternsMixin + +```ts +withPatternsMixin(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +**Note:** This function appends passed data to existing values + +## obj spec.resources.patches.transforms.match.patterns + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.resources.patches.transforms.match.patterns.withLiteral + +```ts +withLiteral(literal) +``` + +"Literal exactly matches the input string (case sensitive).\nIs required if `type` is `literal`." + +### fn spec.resources.patches.transforms.match.patterns.withRegexp + +```ts +withRegexp(regexp) +``` + +"Regexp to match against the input string.\nIs required if `type` is `regexp`." + +### fn spec.resources.patches.transforms.match.patterns.withResult + +```ts +withResult(result) +``` + +"The value that is used as result of the transform if the pattern matches." + +### fn spec.resources.patches.transforms.match.patterns.withType + +```ts +withType(type) +``` + +"Type specifies how the pattern matches the input.\n\n\n* `literal` - the pattern value has to exactly match (case sensitive) the\ninput string. This is the default.\n\n\n* `regexp` - the pattern treated as a regular expression against\nwhich the input string is tested. Crossplane will throw an error if the\nkey is not a valid regexp." + +## obj spec.resources.patches.transforms.math + +"Math is used to transform the input via mathematical operations such as\nmultiplication." + +### fn spec.resources.patches.transforms.math.withClampMax + +```ts +withClampMax(clampMax) +``` + +"ClampMax makes sure that the value is not bigger than the given value." + +### fn spec.resources.patches.transforms.math.withClampMin + +```ts +withClampMin(clampMin) +``` + +"ClampMin makes sure that the value is not smaller than the given value." + +### fn spec.resources.patches.transforms.math.withMultiply + +```ts +withMultiply(multiply) +``` + +"Multiply the value." + +### fn spec.resources.patches.transforms.math.withType + +```ts +withType(type) +``` + +"Type of the math transform to be run." + +## obj spec.resources.patches.transforms.string + +"String is used to transform the input into a string or a different kind\nof string. Note that the input does not necessarily need to be a string." + +### fn spec.resources.patches.transforms.string.withConvert + +```ts +withConvert(convert) +``` + +"Optional conversion method to be specified.\n`ToUpper` and `ToLower` change the letter case of the input string.\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\n`ToJson` converts any input value into its raw JSON representation.\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\nconverted to JSON.\n`ToAdler32` generate a addler32 hash based on the input string." + +### fn spec.resources.patches.transforms.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +### fn spec.resources.patches.transforms.string.withTrim + +```ts +withTrim(trim) +``` + +"Trim the prefix or suffix from the input" + +### fn spec.resources.patches.transforms.string.withType + +```ts +withType(type) +``` + +"Type of the string transform to be run." + +## obj spec.resources.patches.transforms.string.join + +"Join defines parameters to join a slice of values to a string." + +### fn spec.resources.patches.transforms.string.join.withSeparator + +```ts +withSeparator(separator) +``` + +"Separator defines the character that should separate the values from each\nother in the joined string." + +## obj spec.resources.patches.transforms.string.regexp + +"Extract a match from the input using a regular expression." + +### fn spec.resources.patches.transforms.string.regexp.withGroup + +```ts +withGroup(group) +``` + +"Group number to match. 0 (the default) matches the entire expression." + +### fn spec.resources.patches.transforms.string.regexp.withMatch + +```ts +withMatch(match) +``` + +"Match string. May optionally include submatches, aka capture groups.\nSee https://pkg.go.dev/regexp/ for details." + +## obj spec.resources.readinessChecks + +"ReadinessChecks allows users to define custom readiness checks. All checks\nhave to return true in order for resource to be considered ready. The\ndefault readiness check is to have the \"Ready\" condition to be \"True\"." + +### fn spec.resources.readinessChecks.withFieldPath + +```ts +withFieldPath(fieldPath) +``` + +"FieldPath shows the path of the field whose value will be used." + +### fn spec.resources.readinessChecks.withMatchInteger + +```ts +withMatchInteger(matchInteger) +``` + +"MatchInt is the value you'd like to match if you're using \"MatchInt\" type." + +### fn spec.resources.readinessChecks.withMatchString + +```ts +withMatchString(matchString) +``` + +"MatchString is the value you'd like to match if you're using \"MatchString\" type." + +### fn spec.resources.readinessChecks.withType + +```ts +withType(type) +``` + +"Type indicates the type of probe you'd like to use." + +## obj spec.resources.readinessChecks.matchCondition + +"MatchCondition specifies the condition you'd like to match if you're using \"MatchCondition\" type." + +### fn spec.resources.readinessChecks.matchCondition.withType + +```ts +withType(type) +``` + +"Type indicates the type of condition you'd like to use." \ No newline at end of file diff --git a/docs/crossplane/1.17/apiextensions/v1/index.md b/docs/crossplane/1.17/apiextensions/v1/index.md new file mode 100644 index 0000000..4dd10ac --- /dev/null +++ b/docs/crossplane/1.17/apiextensions/v1/index.md @@ -0,0 +1,11 @@ +--- +permalink: /crossplane/1.17/apiextensions/v1/ +--- + +# apiextensions.v1 + + + +* [compositeResourceDefinition](compositeResourceDefinition.md) +* [composition](composition.md) +* [compositionRevision](compositionRevision.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/apiextensions/v1alpha1/environmentConfig.md b/docs/crossplane/1.17/apiextensions/v1alpha1/environmentConfig.md new file mode 100644 index 0000000..4b279f1 --- /dev/null +++ b/docs/crossplane/1.17/apiextensions/v1alpha1/environmentConfig.md @@ -0,0 +1,225 @@ +--- +permalink: /crossplane/1.17/apiextensions/v1alpha1/environmentConfig/ +--- + +# apiextensions.v1alpha1.environmentConfig + +"An EnvironmentConfig contains user-defined unstructured values for\nuse in a Composition.\n\n\nRead the Crossplane documentation for\n[more information about EnvironmentConfigs](https://docs.crossplane.io/latest/concepts/environment-configs)." + +## Index + +* [`fn new(name)`](#fn-new) +* [`fn withData(data)`](#fn-withdata) +* [`fn withDataMixin(data)`](#fn-withdatamixin) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of EnvironmentConfig + +### fn withData + +```ts +withData(data) +``` + +"The data of this EnvironmentConfig.\nThis may contain any kind of structure that can be serialized into JSON." + +### fn withDataMixin + +```ts +withDataMixin(data) +``` + +"The data of this EnvironmentConfig.\nThis may contain any kind of structure that can be serialized into JSON." + +**Note:** This function appends passed data to existing values + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" \ No newline at end of file diff --git a/docs/crossplane/1.17/apiextensions/v1alpha1/index.md b/docs/crossplane/1.17/apiextensions/v1alpha1/index.md new file mode 100644 index 0000000..39c8277 --- /dev/null +++ b/docs/crossplane/1.17/apiextensions/v1alpha1/index.md @@ -0,0 +1,10 @@ +--- +permalink: /crossplane/1.17/apiextensions/v1alpha1/ +--- + +# apiextensions.v1alpha1 + + + +* [environmentConfig](environmentConfig.md) +* [usage](usage.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/apiextensions/v1alpha1/usage.md b/docs/crossplane/1.17/apiextensions/v1alpha1/usage.md new file mode 100644 index 0000000..6c49131 --- /dev/null +++ b/docs/crossplane/1.17/apiextensions/v1alpha1/usage.md @@ -0,0 +1,370 @@ +--- +permalink: /crossplane/1.17/apiextensions/v1alpha1/usage/ +--- + +# apiextensions.v1alpha1.usage + +"A Usage defines a deletion blocking relationship between two resources.\n\n\nUsages prevent accidental deletion of a single resource or deletion of\nresources with dependent resources.\n\n\nRead the Crossplane documentation for\n[more information about Compositions](https://docs.crossplane.io/latest/concepts/usages)." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withReason(reason)`](#fn-specwithreason) + * [`fn withReplayDeletion(replayDeletion)`](#fn-specwithreplaydeletion) + * [`obj spec.by`](#obj-specby) + * [`fn withApiVersion(apiVersion)`](#fn-specbywithapiversion) + * [`fn withKind(kind)`](#fn-specbywithkind) + * [`obj spec.by.resourceRef`](#obj-specbyresourceref) + * [`fn withName(name)`](#fn-specbyresourcerefwithname) + * [`obj spec.by.resourceSelector`](#obj-specbyresourceselector) + * [`fn withMatchControllerRef(matchControllerRef)`](#fn-specbyresourceselectorwithmatchcontrollerref) + * [`fn withMatchLabels(matchLabels)`](#fn-specbyresourceselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specbyresourceselectorwithmatchlabelsmixin) + * [`obj spec.of`](#obj-specof) + * [`fn withApiVersion(apiVersion)`](#fn-specofwithapiversion) + * [`fn withKind(kind)`](#fn-specofwithkind) + * [`obj spec.of.resourceRef`](#obj-specofresourceref) + * [`fn withName(name)`](#fn-specofresourcerefwithname) + * [`obj spec.of.resourceSelector`](#obj-specofresourceselector) + * [`fn withMatchControllerRef(matchControllerRef)`](#fn-specofresourceselectorwithmatchcontrollerref) + * [`fn withMatchLabels(matchLabels)`](#fn-specofresourceselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specofresourceselectorwithmatchlabelsmixin) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Usage + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"UsageSpec defines the desired state of Usage." + +### fn spec.withReason + +```ts +withReason(reason) +``` + +"Reason is the reason for blocking deletion of the resource." + +### fn spec.withReplayDeletion + +```ts +withReplayDeletion(replayDeletion) +``` + +"ReplayDeletion will trigger a deletion on the used resource during the deletion of the usage itself, if it was attempted to be deleted at least once." + +## obj spec.by + +"By is the resource that is \"using the other resource\"." + +### fn spec.by.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"API version of the referent." + +### fn spec.by.withKind + +```ts +withKind(kind) +``` + +"Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + +## obj spec.by.resourceRef + +"Reference to the resource." + +### fn spec.by.resourceRef.withName + +```ts +withName(name) +``` + +"Name of the referent." + +## obj spec.by.resourceSelector + +"Selector to the resource.\nThis field will be ignored if ResourceRef is set." + +### fn spec.by.resourceSelector.withMatchControllerRef + +```ts +withMatchControllerRef(matchControllerRef) +``` + +"MatchControllerRef ensures an object with the same controller reference\nas the selecting object is selected." + +### fn spec.by.resourceSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"MatchLabels ensures an object with matching labels is selected." + +### fn spec.by.resourceSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"MatchLabels ensures an object with matching labels is selected." + +**Note:** This function appends passed data to existing values + +## obj spec.of + +"Of is the resource that is \"being used\"." + +### fn spec.of.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"API version of the referent." + +### fn spec.of.withKind + +```ts +withKind(kind) +``` + +"Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + +## obj spec.of.resourceRef + +"Reference to the resource." + +### fn spec.of.resourceRef.withName + +```ts +withName(name) +``` + +"Name of the referent." + +## obj spec.of.resourceSelector + +"Selector to the resource.\nThis field will be ignored if ResourceRef is set." + +### fn spec.of.resourceSelector.withMatchControllerRef + +```ts +withMatchControllerRef(matchControllerRef) +``` + +"MatchControllerRef ensures an object with the same controller reference\nas the selecting object is selected." + +### fn spec.of.resourceSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"MatchLabels ensures an object with matching labels is selected." + +### fn spec.of.resourceSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"MatchLabels ensures an object with matching labels is selected." + +**Note:** This function appends passed data to existing values \ No newline at end of file diff --git a/docs/crossplane/1.17/apiextensions/v1beta1/compositionRevision.md b/docs/crossplane/1.17/apiextensions/v1beta1/compositionRevision.md new file mode 100644 index 0000000..f52c9b6 --- /dev/null +++ b/docs/crossplane/1.17/apiextensions/v1beta1/compositionRevision.md @@ -0,0 +1,2159 @@ +--- +permalink: /crossplane/1.17/apiextensions/v1beta1/compositionRevision/ +--- + +# apiextensions.v1beta1.compositionRevision + +"A CompositionRevision represents a revision of a Composition. Crossplane\ncreates new revisions when there are changes to the Composition.\n\n\nCrossplane creates and manages CompositionRevisions. Don't directly edit\nCompositionRevisions." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withMode(mode)`](#fn-specwithmode) + * [`fn withPatchSets(patchSets)`](#fn-specwithpatchsets) + * [`fn withPatchSetsMixin(patchSets)`](#fn-specwithpatchsetsmixin) + * [`fn withPipeline(pipeline)`](#fn-specwithpipeline) + * [`fn withPipelineMixin(pipeline)`](#fn-specwithpipelinemixin) + * [`fn withResources(resources)`](#fn-specwithresources) + * [`fn withResourcesMixin(resources)`](#fn-specwithresourcesmixin) + * [`fn withRevision(revision)`](#fn-specwithrevision) + * [`fn withWriteConnectionSecretsToNamespace(writeConnectionSecretsToNamespace)`](#fn-specwithwriteconnectionsecretstonamespace) + * [`obj spec.compositeTypeRef`](#obj-speccompositetyperef) + * [`fn withApiVersion(apiVersion)`](#fn-speccompositetyperefwithapiversion) + * [`fn withKind(kind)`](#fn-speccompositetyperefwithkind) + * [`obj spec.environment`](#obj-specenvironment) + * [`fn withDefaultData(defaultData)`](#fn-specenvironmentwithdefaultdata) + * [`fn withDefaultDataMixin(defaultData)`](#fn-specenvironmentwithdefaultdatamixin) + * [`fn withEnvironmentConfigs(environmentConfigs)`](#fn-specenvironmentwithenvironmentconfigs) + * [`fn withEnvironmentConfigsMixin(environmentConfigs)`](#fn-specenvironmentwithenvironmentconfigsmixin) + * [`fn withPatches(patches)`](#fn-specenvironmentwithpatches) + * [`fn withPatchesMixin(patches)`](#fn-specenvironmentwithpatchesmixin) + * [`obj spec.environment.environmentConfigs`](#obj-specenvironmentenvironmentconfigs) + * [`fn withType(type)`](#fn-specenvironmentenvironmentconfigswithtype) + * [`obj spec.environment.environmentConfigs.ref`](#obj-specenvironmentenvironmentconfigsref) + * [`fn withName(name)`](#fn-specenvironmentenvironmentconfigsrefwithname) + * [`obj spec.environment.environmentConfigs.selector`](#obj-specenvironmentenvironmentconfigsselector) + * [`fn withMatchLabels(matchLabels)`](#fn-specenvironmentenvironmentconfigsselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specenvironmentenvironmentconfigsselectorwithmatchlabelsmixin) + * [`fn withMaxMatch(maxMatch)`](#fn-specenvironmentenvironmentconfigsselectorwithmaxmatch) + * [`fn withMinMatch(minMatch)`](#fn-specenvironmentenvironmentconfigsselectorwithminmatch) + * [`fn withMode(mode)`](#fn-specenvironmentenvironmentconfigsselectorwithmode) + * [`fn withSortByFieldPath(sortByFieldPath)`](#fn-specenvironmentenvironmentconfigsselectorwithsortbyfieldpath) + * [`obj spec.environment.environmentConfigs.selector.matchLabels`](#obj-specenvironmentenvironmentconfigsselectormatchlabels) + * [`fn withFromFieldPathPolicy(fromFieldPathPolicy)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithfromfieldpathpolicy) + * [`fn withKey(key)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithkey) + * [`fn withType(type)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithtype) + * [`fn withValue(value)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithvalue) + * [`fn withValueFromFieldPath(valueFromFieldPath)`](#fn-specenvironmentenvironmentconfigsselectormatchlabelswithvaluefromfieldpath) + * [`obj spec.environment.patches`](#obj-specenvironmentpatches) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specenvironmentpatcheswithfromfieldpath) + * [`fn withToFieldPath(toFieldPath)`](#fn-specenvironmentpatcheswithtofieldpath) + * [`fn withTransforms(transforms)`](#fn-specenvironmentpatcheswithtransforms) + * [`fn withTransformsMixin(transforms)`](#fn-specenvironmentpatcheswithtransformsmixin) + * [`fn withType(type)`](#fn-specenvironmentpatcheswithtype) + * [`obj spec.environment.patches.combine`](#obj-specenvironmentpatchescombine) + * [`fn withStrategy(strategy)`](#fn-specenvironmentpatchescombinewithstrategy) + * [`fn withVariables(variables)`](#fn-specenvironmentpatchescombinewithvariables) + * [`fn withVariablesMixin(variables)`](#fn-specenvironmentpatchescombinewithvariablesmixin) + * [`obj spec.environment.patches.combine.string`](#obj-specenvironmentpatchescombinestring) + * [`fn withFmt(fmt)`](#fn-specenvironmentpatchescombinestringwithfmt) + * [`obj spec.environment.patches.combine.variables`](#obj-specenvironmentpatchescombinevariables) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specenvironmentpatchescombinevariableswithfromfieldpath) + * [`obj spec.environment.patches.policy`](#obj-specenvironmentpatchespolicy) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specenvironmentpatchespolicywithfromfieldpath) + * [`obj spec.environment.patches.policy.mergeOptions`](#obj-specenvironmentpatchespolicymergeoptions) + * [`fn withAppendSlice(appendSlice)`](#fn-specenvironmentpatchespolicymergeoptionswithappendslice) + * [`fn withKeepMapValues(keepMapValues)`](#fn-specenvironmentpatchespolicymergeoptionswithkeepmapvalues) + * [`obj spec.environment.patches.transforms`](#obj-specenvironmentpatchestransforms) + * [`fn withMap(map)`](#fn-specenvironmentpatchestransformswithmap) + * [`fn withMapMixin(map)`](#fn-specenvironmentpatchestransformswithmapmixin) + * [`fn withType(type)`](#fn-specenvironmentpatchestransformswithtype) + * [`obj spec.environment.patches.transforms.convert`](#obj-specenvironmentpatchestransformsconvert) + * [`fn withFormat(format)`](#fn-specenvironmentpatchestransformsconvertwithformat) + * [`fn withToType(toType)`](#fn-specenvironmentpatchestransformsconvertwithtotype) + * [`obj spec.environment.patches.transforms.match`](#obj-specenvironmentpatchestransformsmatch) + * [`fn withFallbackTo(fallbackTo)`](#fn-specenvironmentpatchestransformsmatchwithfallbackto) + * [`fn withFallbackValue(fallbackValue)`](#fn-specenvironmentpatchestransformsmatchwithfallbackvalue) + * [`fn withPatterns(patterns)`](#fn-specenvironmentpatchestransformsmatchwithpatterns) + * [`fn withPatternsMixin(patterns)`](#fn-specenvironmentpatchestransformsmatchwithpatternsmixin) + * [`obj spec.environment.patches.transforms.match.patterns`](#obj-specenvironmentpatchestransformsmatchpatterns) + * [`fn withLiteral(literal)`](#fn-specenvironmentpatchestransformsmatchpatternswithliteral) + * [`fn withRegexp(regexp)`](#fn-specenvironmentpatchestransformsmatchpatternswithregexp) + * [`fn withResult(result)`](#fn-specenvironmentpatchestransformsmatchpatternswithresult) + * [`fn withType(type)`](#fn-specenvironmentpatchestransformsmatchpatternswithtype) + * [`obj spec.environment.patches.transforms.math`](#obj-specenvironmentpatchestransformsmath) + * [`fn withClampMax(clampMax)`](#fn-specenvironmentpatchestransformsmathwithclampmax) + * [`fn withClampMin(clampMin)`](#fn-specenvironmentpatchestransformsmathwithclampmin) + * [`fn withMultiply(multiply)`](#fn-specenvironmentpatchestransformsmathwithmultiply) + * [`fn withType(type)`](#fn-specenvironmentpatchestransformsmathwithtype) + * [`obj spec.environment.patches.transforms.string`](#obj-specenvironmentpatchestransformsstring) + * [`fn withConvert(convert)`](#fn-specenvironmentpatchestransformsstringwithconvert) + * [`fn withFmt(fmt)`](#fn-specenvironmentpatchestransformsstringwithfmt) + * [`fn withTrim(trim)`](#fn-specenvironmentpatchestransformsstringwithtrim) + * [`fn withType(type)`](#fn-specenvironmentpatchestransformsstringwithtype) + * [`obj spec.environment.patches.transforms.string.join`](#obj-specenvironmentpatchestransformsstringjoin) + * [`fn withSeparator(separator)`](#fn-specenvironmentpatchestransformsstringjoinwithseparator) + * [`obj spec.environment.patches.transforms.string.regexp`](#obj-specenvironmentpatchestransformsstringregexp) + * [`fn withGroup(group)`](#fn-specenvironmentpatchestransformsstringregexpwithgroup) + * [`fn withMatch(match)`](#fn-specenvironmentpatchestransformsstringregexpwithmatch) + * [`obj spec.environment.policy`](#obj-specenvironmentpolicy) + * [`fn withResolution(resolution)`](#fn-specenvironmentpolicywithresolution) + * [`fn withResolve(resolve)`](#fn-specenvironmentpolicywithresolve) + * [`obj spec.patchSets`](#obj-specpatchsets) + * [`fn withName(name)`](#fn-specpatchsetswithname) + * [`fn withPatches(patches)`](#fn-specpatchsetswithpatches) + * [`fn withPatchesMixin(patches)`](#fn-specpatchsetswithpatchesmixin) + * [`obj spec.patchSets.patches`](#obj-specpatchsetspatches) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specpatchsetspatcheswithfromfieldpath) + * [`fn withPatchSetName(patchSetName)`](#fn-specpatchsetspatcheswithpatchsetname) + * [`fn withToFieldPath(toFieldPath)`](#fn-specpatchsetspatcheswithtofieldpath) + * [`fn withTransforms(transforms)`](#fn-specpatchsetspatcheswithtransforms) + * [`fn withTransformsMixin(transforms)`](#fn-specpatchsetspatcheswithtransformsmixin) + * [`fn withType(type)`](#fn-specpatchsetspatcheswithtype) + * [`obj spec.patchSets.patches.combine`](#obj-specpatchsetspatchescombine) + * [`fn withStrategy(strategy)`](#fn-specpatchsetspatchescombinewithstrategy) + * [`fn withVariables(variables)`](#fn-specpatchsetspatchescombinewithvariables) + * [`fn withVariablesMixin(variables)`](#fn-specpatchsetspatchescombinewithvariablesmixin) + * [`obj spec.patchSets.patches.combine.string`](#obj-specpatchsetspatchescombinestring) + * [`fn withFmt(fmt)`](#fn-specpatchsetspatchescombinestringwithfmt) + * [`obj spec.patchSets.patches.combine.variables`](#obj-specpatchsetspatchescombinevariables) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specpatchsetspatchescombinevariableswithfromfieldpath) + * [`obj spec.patchSets.patches.policy`](#obj-specpatchsetspatchespolicy) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specpatchsetspatchespolicywithfromfieldpath) + * [`obj spec.patchSets.patches.policy.mergeOptions`](#obj-specpatchsetspatchespolicymergeoptions) + * [`fn withAppendSlice(appendSlice)`](#fn-specpatchsetspatchespolicymergeoptionswithappendslice) + * [`fn withKeepMapValues(keepMapValues)`](#fn-specpatchsetspatchespolicymergeoptionswithkeepmapvalues) + * [`obj spec.patchSets.patches.transforms`](#obj-specpatchsetspatchestransforms) + * [`fn withMap(map)`](#fn-specpatchsetspatchestransformswithmap) + * [`fn withMapMixin(map)`](#fn-specpatchsetspatchestransformswithmapmixin) + * [`fn withType(type)`](#fn-specpatchsetspatchestransformswithtype) + * [`obj spec.patchSets.patches.transforms.convert`](#obj-specpatchsetspatchestransformsconvert) + * [`fn withFormat(format)`](#fn-specpatchsetspatchestransformsconvertwithformat) + * [`fn withToType(toType)`](#fn-specpatchsetspatchestransformsconvertwithtotype) + * [`obj spec.patchSets.patches.transforms.match`](#obj-specpatchsetspatchestransformsmatch) + * [`fn withFallbackTo(fallbackTo)`](#fn-specpatchsetspatchestransformsmatchwithfallbackto) + * [`fn withFallbackValue(fallbackValue)`](#fn-specpatchsetspatchestransformsmatchwithfallbackvalue) + * [`fn withPatterns(patterns)`](#fn-specpatchsetspatchestransformsmatchwithpatterns) + * [`fn withPatternsMixin(patterns)`](#fn-specpatchsetspatchestransformsmatchwithpatternsmixin) + * [`obj spec.patchSets.patches.transforms.match.patterns`](#obj-specpatchsetspatchestransformsmatchpatterns) + * [`fn withLiteral(literal)`](#fn-specpatchsetspatchestransformsmatchpatternswithliteral) + * [`fn withRegexp(regexp)`](#fn-specpatchsetspatchestransformsmatchpatternswithregexp) + * [`fn withResult(result)`](#fn-specpatchsetspatchestransformsmatchpatternswithresult) + * [`fn withType(type)`](#fn-specpatchsetspatchestransformsmatchpatternswithtype) + * [`obj spec.patchSets.patches.transforms.math`](#obj-specpatchsetspatchestransformsmath) + * [`fn withClampMax(clampMax)`](#fn-specpatchsetspatchestransformsmathwithclampmax) + * [`fn withClampMin(clampMin)`](#fn-specpatchsetspatchestransformsmathwithclampmin) + * [`fn withMultiply(multiply)`](#fn-specpatchsetspatchestransformsmathwithmultiply) + * [`fn withType(type)`](#fn-specpatchsetspatchestransformsmathwithtype) + * [`obj spec.patchSets.patches.transforms.string`](#obj-specpatchsetspatchestransformsstring) + * [`fn withConvert(convert)`](#fn-specpatchsetspatchestransformsstringwithconvert) + * [`fn withFmt(fmt)`](#fn-specpatchsetspatchestransformsstringwithfmt) + * [`fn withTrim(trim)`](#fn-specpatchsetspatchestransformsstringwithtrim) + * [`fn withType(type)`](#fn-specpatchsetspatchestransformsstringwithtype) + * [`obj spec.patchSets.patches.transforms.string.join`](#obj-specpatchsetspatchestransformsstringjoin) + * [`fn withSeparator(separator)`](#fn-specpatchsetspatchestransformsstringjoinwithseparator) + * [`obj spec.patchSets.patches.transforms.string.regexp`](#obj-specpatchsetspatchestransformsstringregexp) + * [`fn withGroup(group)`](#fn-specpatchsetspatchestransformsstringregexpwithgroup) + * [`fn withMatch(match)`](#fn-specpatchsetspatchestransformsstringregexpwithmatch) + * [`obj spec.pipeline`](#obj-specpipeline) + * [`fn withCredentials(credentials)`](#fn-specpipelinewithcredentials) + * [`fn withCredentialsMixin(credentials)`](#fn-specpipelinewithcredentialsmixin) + * [`fn withInput(input)`](#fn-specpipelinewithinput) + * [`fn withInputMixin(input)`](#fn-specpipelinewithinputmixin) + * [`fn withStep(step)`](#fn-specpipelinewithstep) + * [`obj spec.pipeline.credentials`](#obj-specpipelinecredentials) + * [`fn withName(name)`](#fn-specpipelinecredentialswithname) + * [`fn withSource(source)`](#fn-specpipelinecredentialswithsource) + * [`obj spec.pipeline.credentials.secretRef`](#obj-specpipelinecredentialssecretref) + * [`fn withName(name)`](#fn-specpipelinecredentialssecretrefwithname) + * [`fn withNamespace(namespace)`](#fn-specpipelinecredentialssecretrefwithnamespace) + * [`obj spec.pipeline.functionRef`](#obj-specpipelinefunctionref) + * [`fn withName(name)`](#fn-specpipelinefunctionrefwithname) + * [`obj spec.publishConnectionDetailsWithStoreConfigRef`](#obj-specpublishconnectiondetailswithstoreconfigref) + * [`fn withName(name)`](#fn-specpublishconnectiondetailswithstoreconfigrefwithname) + * [`obj spec.resources`](#obj-specresources) + * [`fn withBase(base)`](#fn-specresourceswithbase) + * [`fn withBaseMixin(base)`](#fn-specresourceswithbasemixin) + * [`fn withConnectionDetails(connectionDetails)`](#fn-specresourceswithconnectiondetails) + * [`fn withConnectionDetailsMixin(connectionDetails)`](#fn-specresourceswithconnectiondetailsmixin) + * [`fn withName(name)`](#fn-specresourceswithname) + * [`fn withPatches(patches)`](#fn-specresourceswithpatches) + * [`fn withPatchesMixin(patches)`](#fn-specresourceswithpatchesmixin) + * [`fn withReadinessChecks(readinessChecks)`](#fn-specresourceswithreadinesschecks) + * [`fn withReadinessChecksMixin(readinessChecks)`](#fn-specresourceswithreadinesschecksmixin) + * [`obj spec.resources.connectionDetails`](#obj-specresourcesconnectiondetails) + * [`fn withFromConnectionSecretKey(fromConnectionSecretKey)`](#fn-specresourcesconnectiondetailswithfromconnectionsecretkey) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specresourcesconnectiondetailswithfromfieldpath) + * [`fn withName(name)`](#fn-specresourcesconnectiondetailswithname) + * [`fn withType(type)`](#fn-specresourcesconnectiondetailswithtype) + * [`fn withValue(value)`](#fn-specresourcesconnectiondetailswithvalue) + * [`obj spec.resources.patches`](#obj-specresourcespatches) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specresourcespatcheswithfromfieldpath) + * [`fn withPatchSetName(patchSetName)`](#fn-specresourcespatcheswithpatchsetname) + * [`fn withToFieldPath(toFieldPath)`](#fn-specresourcespatcheswithtofieldpath) + * [`fn withTransforms(transforms)`](#fn-specresourcespatcheswithtransforms) + * [`fn withTransformsMixin(transforms)`](#fn-specresourcespatcheswithtransformsmixin) + * [`fn withType(type)`](#fn-specresourcespatcheswithtype) + * [`obj spec.resources.patches.combine`](#obj-specresourcespatchescombine) + * [`fn withStrategy(strategy)`](#fn-specresourcespatchescombinewithstrategy) + * [`fn withVariables(variables)`](#fn-specresourcespatchescombinewithvariables) + * [`fn withVariablesMixin(variables)`](#fn-specresourcespatchescombinewithvariablesmixin) + * [`obj spec.resources.patches.combine.string`](#obj-specresourcespatchescombinestring) + * [`fn withFmt(fmt)`](#fn-specresourcespatchescombinestringwithfmt) + * [`obj spec.resources.patches.combine.variables`](#obj-specresourcespatchescombinevariables) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specresourcespatchescombinevariableswithfromfieldpath) + * [`obj spec.resources.patches.policy`](#obj-specresourcespatchespolicy) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-specresourcespatchespolicywithfromfieldpath) + * [`obj spec.resources.patches.policy.mergeOptions`](#obj-specresourcespatchespolicymergeoptions) + * [`fn withAppendSlice(appendSlice)`](#fn-specresourcespatchespolicymergeoptionswithappendslice) + * [`fn withKeepMapValues(keepMapValues)`](#fn-specresourcespatchespolicymergeoptionswithkeepmapvalues) + * [`obj spec.resources.patches.transforms`](#obj-specresourcespatchestransforms) + * [`fn withMap(map)`](#fn-specresourcespatchestransformswithmap) + * [`fn withMapMixin(map)`](#fn-specresourcespatchestransformswithmapmixin) + * [`fn withType(type)`](#fn-specresourcespatchestransformswithtype) + * [`obj spec.resources.patches.transforms.convert`](#obj-specresourcespatchestransformsconvert) + * [`fn withFormat(format)`](#fn-specresourcespatchestransformsconvertwithformat) + * [`fn withToType(toType)`](#fn-specresourcespatchestransformsconvertwithtotype) + * [`obj spec.resources.patches.transforms.match`](#obj-specresourcespatchestransformsmatch) + * [`fn withFallbackTo(fallbackTo)`](#fn-specresourcespatchestransformsmatchwithfallbackto) + * [`fn withFallbackValue(fallbackValue)`](#fn-specresourcespatchestransformsmatchwithfallbackvalue) + * [`fn withPatterns(patterns)`](#fn-specresourcespatchestransformsmatchwithpatterns) + * [`fn withPatternsMixin(patterns)`](#fn-specresourcespatchestransformsmatchwithpatternsmixin) + * [`obj spec.resources.patches.transforms.match.patterns`](#obj-specresourcespatchestransformsmatchpatterns) + * [`fn withLiteral(literal)`](#fn-specresourcespatchestransformsmatchpatternswithliteral) + * [`fn withRegexp(regexp)`](#fn-specresourcespatchestransformsmatchpatternswithregexp) + * [`fn withResult(result)`](#fn-specresourcespatchestransformsmatchpatternswithresult) + * [`fn withType(type)`](#fn-specresourcespatchestransformsmatchpatternswithtype) + * [`obj spec.resources.patches.transforms.math`](#obj-specresourcespatchestransformsmath) + * [`fn withClampMax(clampMax)`](#fn-specresourcespatchestransformsmathwithclampmax) + * [`fn withClampMin(clampMin)`](#fn-specresourcespatchestransformsmathwithclampmin) + * [`fn withMultiply(multiply)`](#fn-specresourcespatchestransformsmathwithmultiply) + * [`fn withType(type)`](#fn-specresourcespatchestransformsmathwithtype) + * [`obj spec.resources.patches.transforms.string`](#obj-specresourcespatchestransformsstring) + * [`fn withConvert(convert)`](#fn-specresourcespatchestransformsstringwithconvert) + * [`fn withFmt(fmt)`](#fn-specresourcespatchestransformsstringwithfmt) + * [`fn withTrim(trim)`](#fn-specresourcespatchestransformsstringwithtrim) + * [`fn withType(type)`](#fn-specresourcespatchestransformsstringwithtype) + * [`obj spec.resources.patches.transforms.string.join`](#obj-specresourcespatchestransformsstringjoin) + * [`fn withSeparator(separator)`](#fn-specresourcespatchestransformsstringjoinwithseparator) + * [`obj spec.resources.patches.transforms.string.regexp`](#obj-specresourcespatchestransformsstringregexp) + * [`fn withGroup(group)`](#fn-specresourcespatchestransformsstringregexpwithgroup) + * [`fn withMatch(match)`](#fn-specresourcespatchestransformsstringregexpwithmatch) + * [`obj spec.resources.readinessChecks`](#obj-specresourcesreadinesschecks) + * [`fn withFieldPath(fieldPath)`](#fn-specresourcesreadinesscheckswithfieldpath) + * [`fn withMatchInteger(matchInteger)`](#fn-specresourcesreadinesscheckswithmatchinteger) + * [`fn withMatchString(matchString)`](#fn-specresourcesreadinesscheckswithmatchstring) + * [`fn withType(type)`](#fn-specresourcesreadinesscheckswithtype) + * [`obj spec.resources.readinessChecks.matchCondition`](#obj-specresourcesreadinesschecksmatchcondition) + * [`fn withType(type)`](#fn-specresourcesreadinesschecksmatchconditionwithtype) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of CompositionRevision + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"CompositionRevisionSpec specifies the desired state of the composition\nrevision." + +### fn spec.withMode + +```ts +withMode(mode) +``` + +"Mode controls what type or \"mode\" of Composition will be used.\n\n\n\"Pipeline\" indicates that a Composition specifies a pipeline of\nComposition Functions, each of which is responsible for producing\ncomposed resources that Crossplane should create or update.\n\n\n\"Resources\" indicates that a Composition uses what is commonly referred\nto as \"Patch & Transform\" or P&T composition. This mode of Composition\nuses an array of resources, each a template for a composed resource.\n\n\nAll Compositions should use Pipeline mode. Resources mode is deprecated.\nResources mode won't be removed in Crossplane 1.x, and will remain the\ndefault to avoid breaking legacy Compositions. However, it's no longer\naccepting new features, and only accepting security related bug fixes." + +### fn spec.withPatchSets + +```ts +withPatchSets(patchSets) +``` + +"PatchSets define a named set of patches that may be included by any\nresource in this Composition. PatchSets cannot themselves refer to other\nPatchSets.\n\n\nPatchSets are only used by the \"Resources\" mode of Composition. They\nare ignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +### fn spec.withPatchSetsMixin + +```ts +withPatchSetsMixin(patchSets) +``` + +"PatchSets define a named set of patches that may be included by any\nresource in this Composition. PatchSets cannot themselves refer to other\nPatchSets.\n\n\nPatchSets are only used by the \"Resources\" mode of Composition. They\nare ignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +**Note:** This function appends passed data to existing values + +### fn spec.withPipeline + +```ts +withPipeline(pipeline) +``` + +"Pipeline is a list of composition function steps that will be used when a\ncomposite resource referring to this composition is created. One of\nresources and pipeline must be specified - you cannot specify both.\n\n\nThe Pipeline is only used by the \"Pipeline\" mode of Composition. It is\nignored by other modes." + +### fn spec.withPipelineMixin + +```ts +withPipelineMixin(pipeline) +``` + +"Pipeline is a list of composition function steps that will be used when a\ncomposite resource referring to this composition is created. One of\nresources and pipeline must be specified - you cannot specify both.\n\n\nThe Pipeline is only used by the \"Pipeline\" mode of Composition. It is\nignored by other modes." + +**Note:** This function appends passed data to existing values + +### fn spec.withResources + +```ts +withResources(resources) +``` + +"Resources is a list of resource templates that will be used when a\ncomposite resource referring to this composition is created.\n\n\nResources are only used by the \"Resources\" mode of Composition. They are\nignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +### fn spec.withResourcesMixin + +```ts +withResourcesMixin(resources) +``` + +"Resources is a list of resource templates that will be used when a\ncomposite resource referring to this composition is created.\n\n\nResources are only used by the \"Resources\" mode of Composition. They are\nignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +**Note:** This function appends passed data to existing values + +### fn spec.withRevision + +```ts +withRevision(revision) +``` + +"Revision number. Newer revisions have larger numbers." + +### fn spec.withWriteConnectionSecretsToNamespace + +```ts +withWriteConnectionSecretsToNamespace(writeConnectionSecretsToNamespace) +``` + +"WriteConnectionSecretsToNamespace specifies the namespace in which the\nconnection secrets of composite resource dynamically provisioned using\nthis composition will be created.\nThis field is planned to be replaced in a future release in favor of\nPublishConnectionDetailsWithStoreConfigRef. Currently, both could be\nset independently and connection details would be published to both\nwithout affecting each other as long as related fields at MR level\nspecified." + +## obj spec.compositeTypeRef + +"CompositeTypeRef specifies the type of composite resource that this\ncomposition is compatible with." + +### fn spec.compositeTypeRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"APIVersion of the type." + +### fn spec.compositeTypeRef.withKind + +```ts +withKind(kind) +``` + +"Kind of the type." + +## obj spec.environment + +"Environment configures the environment in which resources are rendered.\n\n\nTHIS IS AN ALPHA FIELD. Do not use it in production. It is not honored\nunless the relevant Crossplane feature flag is enabled, and may be\nchanged or removed without notice." + +### fn spec.environment.withDefaultData + +```ts +withDefaultData(defaultData) +``` + +"DefaultData statically defines the initial state of the environment.\nIt has the same schema-less structure as the data field in\nenvironment configs.\nIt is overwritten by the selected environment configs." + +### fn spec.environment.withDefaultDataMixin + +```ts +withDefaultDataMixin(defaultData) +``` + +"DefaultData statically defines the initial state of the environment.\nIt has the same schema-less structure as the data field in\nenvironment configs.\nIt is overwritten by the selected environment configs." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.withEnvironmentConfigs + +```ts +withEnvironmentConfigs(environmentConfigs) +``` + +"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\nresources are stored in the composite resource at\n`spec.environmentConfigRefs` and is only updated if it is null.\n\n\nThe list of references is used to compute an in-memory environment at\ncompose time. The data of all object is merged in the order they are\nlisted, meaning the values of EnvironmentConfigs with a larger index take\npriority over ones with smaller indices.\n\n\nThe computed environment can be accessed in a composition using\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches." + +### fn spec.environment.withEnvironmentConfigsMixin + +```ts +withEnvironmentConfigsMixin(environmentConfigs) +``` + +"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\nresources are stored in the composite resource at\n`spec.environmentConfigRefs` and is only updated if it is null.\n\n\nThe list of references is used to compute an in-memory environment at\ncompose time. The data of all object is merged in the order they are\nlisted, meaning the values of EnvironmentConfigs with a larger index take\npriority over ones with smaller indices.\n\n\nThe computed environment can be accessed in a composition using\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.withPatches + +```ts +withPatches(patches) +``` + +"Patches is a list of environment patches that are executed before a\ncomposition's resources are composed." + +### fn spec.environment.withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +"Patches is a list of environment patches that are executed before a\ncomposition's resources are composed." + +**Note:** This function appends passed data to existing values + +## obj spec.environment.environmentConfigs + +"EnvironmentConfigs selects a list of `EnvironmentConfig`s. The resolved\nresources are stored in the composite resource at\n`spec.environmentConfigRefs` and is only updated if it is null.\n\n\nThe list of references is used to compute an in-memory environment at\ncompose time. The data of all object is merged in the order they are\nlisted, meaning the values of EnvironmentConfigs with a larger index take\npriority over ones with smaller indices.\n\n\nThe computed environment can be accessed in a composition using\n`FromEnvironmentFieldPath` and `CombineFromEnvironment` patches." + +### fn spec.environment.environmentConfigs.withType + +```ts +withType(type) +``` + +"Type specifies the way the EnvironmentConfig is selected.\nDefault is `Reference`" + +## obj spec.environment.environmentConfigs.ref + +"Ref is a named reference to a single EnvironmentConfig.\nEither Ref or Selector is required." + +### fn spec.environment.environmentConfigs.ref.withName + +```ts +withName(name) +``` + +"The name of the object." + +## obj spec.environment.environmentConfigs.selector + +"Selector selects EnvironmentConfig(s) via labels." + +### fn spec.environment.environmentConfigs.selector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"MatchLabels ensures an object with matching labels is selected." + +### fn spec.environment.environmentConfigs.selector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"MatchLabels ensures an object with matching labels is selected." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.environmentConfigs.selector.withMaxMatch + +```ts +withMaxMatch(maxMatch) +``` + +"MaxMatch specifies the number of extracted EnvironmentConfigs in Multiple mode, extracts all if nil." + +### fn spec.environment.environmentConfigs.selector.withMinMatch + +```ts +withMinMatch(minMatch) +``` + +"MinMatch specifies the required minimum of extracted EnvironmentConfigs in Multiple mode." + +### fn spec.environment.environmentConfigs.selector.withMode + +```ts +withMode(mode) +``` + +"Mode specifies retrieval strategy: \"Single\" or \"Multiple\"." + +### fn spec.environment.environmentConfigs.selector.withSortByFieldPath + +```ts +withSortByFieldPath(sortByFieldPath) +``` + +"SortByFieldPath is the path to the field based on which list of EnvironmentConfigs is alphabetically sorted." + +## obj spec.environment.environmentConfigs.selector.matchLabels + +"MatchLabels ensures an object with matching labels is selected." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withFromFieldPathPolicy + +```ts +withFromFieldPathPolicy(fromFieldPathPolicy) +``` + +"FromFieldPathPolicy specifies the policy for the valueFromFieldPath.\nThe default is Required, meaning that an error will be returned if the\nfield is not found in the composite resource.\nOptional means that if the field is not found in the composite resource,\nthat label pair will just be skipped. N.B. other specified label\nmatchers will still be used to retrieve the desired\nenvironment config, if any." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withKey + +```ts +withKey(key) +``` + +"Key of the label to match." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withType + +```ts +withType(type) +``` + +"Type specifies where the value for a label comes from." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withValue + +```ts +withValue(value) +``` + +"Value specifies a literal label value." + +### fn spec.environment.environmentConfigs.selector.matchLabels.withValueFromFieldPath + +```ts +withValueFromFieldPath(valueFromFieldPath) +``` + +"ValueFromFieldPath specifies the field path to look for the label value." + +## obj spec.environment.patches + +"Patches is a list of environment patches that are executed before a\ncomposition's resources are composed." + +### fn spec.environment.patches.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the resource whose value is\nto be used as input. Required when type is FromCompositeFieldPath or\nToCompositeFieldPath." + +### fn spec.environment.patches.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath is the path of the field on the resource whose value will\nbe changed with the result of transforms. Leave empty if you'd like to\npropagate to the same path as fromFieldPath." + +### fn spec.environment.patches.withTransforms + +```ts +withTransforms(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.environment.patches.withTransformsMixin + +```ts +withTransformsMixin(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.patches.withType + +```ts +withType(type) +``` + +"Type sets the patching behaviour to be used. Each patch type may require\nits own fields to be set on the Patch object." + +## obj spec.environment.patches.combine + +"Combine is the patch configuration for a CombineFromComposite or\nCombineToComposite patch." + +### fn spec.environment.patches.combine.withStrategy + +```ts +withStrategy(strategy) +``` + +"Strategy defines the strategy to use to combine the input variable values.\nCurrently only string is supported." + +### fn spec.environment.patches.combine.withVariables + +```ts +withVariables(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.environment.patches.combine.withVariablesMixin + +```ts +withVariablesMixin(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +**Note:** This function appends passed data to existing values + +## obj spec.environment.patches.combine.string + +"String declares that input variables should be combined into a single\nstring, using the relevant settings for formatting purposes." + +### fn spec.environment.patches.combine.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +## obj spec.environment.patches.combine.variables + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.environment.patches.combine.variables.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the source whose value is\nto be used as input." + +## obj spec.environment.patches.policy + +"Policy configures the specifics of patching behaviour." + +### fn spec.environment.patches.policy.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath specifies how to patch from a field path. The default is\n'Optional', which means the patch will be a no-op if the specified\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\nthe specified path does not exist." + +## obj spec.environment.patches.policy.mergeOptions + +"MergeOptions Specifies merge options on a field path." + +### fn spec.environment.patches.policy.mergeOptions.withAppendSlice + +```ts +withAppendSlice(appendSlice) +``` + +"Specifies that already existing elements in a merged slice should be preserved" + +### fn spec.environment.patches.policy.mergeOptions.withKeepMapValues + +```ts +withKeepMapValues(keepMapValues) +``` + +"Specifies that already existing values in a merged map should be preserved" + +## obj spec.environment.patches.transforms + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.environment.patches.transforms.withMap + +```ts +withMap(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +### fn spec.environment.patches.transforms.withMapMixin + +```ts +withMapMixin(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +**Note:** This function appends passed data to existing values + +### fn spec.environment.patches.transforms.withType + +```ts +withType(type) +``` + +"Type of the transform to be run." + +## obj spec.environment.patches.transforms.convert + +"Convert is used to cast the input into the given output type." + +### fn spec.environment.patches.transforms.convert.withFormat + +```ts +withFormat(format) +``` + +"The expected input format.\n\n\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\nOnly used during `string -> float64` conversions.\n* `json` - parses the input as a JSON string.\nOnly used during `string -> object` or `string -> list` conversions.\n\n\nIf this property is null, the default conversion is applied." + +### fn spec.environment.patches.transforms.convert.withToType + +```ts +withToType(toType) +``` + +"ToType is the type of the output of this transform." + +## obj spec.environment.patches.transforms.match + +"Match is a more complex version of Map that matches a list of patterns." + +### fn spec.environment.patches.transforms.match.withFallbackTo + +```ts +withFallbackTo(fallbackTo) +``` + +"Determines to what value the transform should fallback if no pattern matches." + +### fn spec.environment.patches.transforms.match.withFallbackValue + +```ts +withFallbackValue(fallbackValue) +``` + +"The fallback value that should be returned by the transform if now pattern\nmatches." + +### fn spec.environment.patches.transforms.match.withPatterns + +```ts +withPatterns(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.environment.patches.transforms.match.withPatternsMixin + +```ts +withPatternsMixin(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +**Note:** This function appends passed data to existing values + +## obj spec.environment.patches.transforms.match.patterns + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.environment.patches.transforms.match.patterns.withLiteral + +```ts +withLiteral(literal) +``` + +"Literal exactly matches the input string (case sensitive).\nIs required if `type` is `literal`." + +### fn spec.environment.patches.transforms.match.patterns.withRegexp + +```ts +withRegexp(regexp) +``` + +"Regexp to match against the input string.\nIs required if `type` is `regexp`." + +### fn spec.environment.patches.transforms.match.patterns.withResult + +```ts +withResult(result) +``` + +"The value that is used as result of the transform if the pattern matches." + +### fn spec.environment.patches.transforms.match.patterns.withType + +```ts +withType(type) +``` + +"Type specifies how the pattern matches the input.\n\n\n* `literal` - the pattern value has to exactly match (case sensitive) the\ninput string. This is the default.\n\n\n* `regexp` - the pattern treated as a regular expression against\nwhich the input string is tested. Crossplane will throw an error if the\nkey is not a valid regexp." + +## obj spec.environment.patches.transforms.math + +"Math is used to transform the input via mathematical operations such as\nmultiplication." + +### fn spec.environment.patches.transforms.math.withClampMax + +```ts +withClampMax(clampMax) +``` + +"ClampMax makes sure that the value is not bigger than the given value." + +### fn spec.environment.patches.transforms.math.withClampMin + +```ts +withClampMin(clampMin) +``` + +"ClampMin makes sure that the value is not smaller than the given value." + +### fn spec.environment.patches.transforms.math.withMultiply + +```ts +withMultiply(multiply) +``` + +"Multiply the value." + +### fn spec.environment.patches.transforms.math.withType + +```ts +withType(type) +``` + +"Type of the math transform to be run." + +## obj spec.environment.patches.transforms.string + +"String is used to transform the input into a string or a different kind\nof string. Note that the input does not necessarily need to be a string." + +### fn spec.environment.patches.transforms.string.withConvert + +```ts +withConvert(convert) +``` + +"Optional conversion method to be specified.\n`ToUpper` and `ToLower` change the letter case of the input string.\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\n`ToJson` converts any input value into its raw JSON representation.\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\nconverted to JSON.\n`ToAdler32` generate a addler32 hash based on the input string." + +### fn spec.environment.patches.transforms.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +### fn spec.environment.patches.transforms.string.withTrim + +```ts +withTrim(trim) +``` + +"Trim the prefix or suffix from the input" + +### fn spec.environment.patches.transforms.string.withType + +```ts +withType(type) +``` + +"Type of the string transform to be run." + +## obj spec.environment.patches.transforms.string.join + +"Join defines parameters to join a slice of values to a string." + +### fn spec.environment.patches.transforms.string.join.withSeparator + +```ts +withSeparator(separator) +``` + +"Separator defines the character that should separate the values from each\nother in the joined string." + +## obj spec.environment.patches.transforms.string.regexp + +"Extract a match from the input using a regular expression." + +### fn spec.environment.patches.transforms.string.regexp.withGroup + +```ts +withGroup(group) +``` + +"Group number to match. 0 (the default) matches the entire expression." + +### fn spec.environment.patches.transforms.string.regexp.withMatch + +```ts +withMatch(match) +``` + +"Match string. May optionally include submatches, aka capture groups.\nSee https://pkg.go.dev/regexp/ for details." + +## obj spec.environment.policy + +"Policy represents the Resolve and Resolution policies which apply to\nall EnvironmentSourceReferences in EnvironmentConfigs list." + +### fn spec.environment.policy.withResolution + +```ts +withResolution(resolution) +``` + +"Resolution specifies whether resolution of this reference is required.\nThe default is 'Required', which means the reconcile will fail if the\nreference cannot be resolved. 'Optional' means this reference will be\na no-op if it cannot be resolved." + +### fn spec.environment.policy.withResolve + +```ts +withResolve(resolve) +``` + +"Resolve specifies when this reference should be resolved. The default\nis 'IfNotPresent', which will attempt to resolve the reference only when\nthe corresponding field is not present. Use 'Always' to resolve the\nreference on every reconcile." + +## obj spec.patchSets + +"PatchSets define a named set of patches that may be included by any\nresource in this Composition. PatchSets cannot themselves refer to other\nPatchSets.\n\n\nPatchSets are only used by the \"Resources\" mode of Composition. They\nare ignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +### fn spec.patchSets.withName + +```ts +withName(name) +``` + +"Name of this PatchSet." + +### fn spec.patchSets.withPatches + +```ts +withPatches(patches) +``` + +"Patches will be applied as an overlay to the base resource." + +### fn spec.patchSets.withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +"Patches will be applied as an overlay to the base resource." + +**Note:** This function appends passed data to existing values + +## obj spec.patchSets.patches + +"Patches will be applied as an overlay to the base resource." + +### fn spec.patchSets.patches.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the resource whose value is\nto be used as input. Required when type is FromCompositeFieldPath,\nFromEnvironmentFieldPath, ToCompositeFieldPath, ToEnvironmentFieldPath." + +### fn spec.patchSets.patches.withPatchSetName + +```ts +withPatchSetName(patchSetName) +``` + +"PatchSetName to include patches from. Required when type is PatchSet." + +### fn spec.patchSets.patches.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath is the path of the field on the resource whose value will\nbe changed with the result of transforms. Leave empty if you'd like to\npropagate to the same path as fromFieldPath." + +### fn spec.patchSets.patches.withTransforms + +```ts +withTransforms(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.patchSets.patches.withTransformsMixin + +```ts +withTransformsMixin(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +**Note:** This function appends passed data to existing values + +### fn spec.patchSets.patches.withType + +```ts +withType(type) +``` + +"Type sets the patching behaviour to be used. Each patch type may require\nits own fields to be set on the Patch object." + +## obj spec.patchSets.patches.combine + +"Combine is the patch configuration for a CombineFromComposite,\nCombineFromEnvironment, CombineToComposite or CombineToEnvironment patch." + +### fn spec.patchSets.patches.combine.withStrategy + +```ts +withStrategy(strategy) +``` + +"Strategy defines the strategy to use to combine the input variable values.\nCurrently only string is supported." + +### fn spec.patchSets.patches.combine.withVariables + +```ts +withVariables(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.patchSets.patches.combine.withVariablesMixin + +```ts +withVariablesMixin(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +**Note:** This function appends passed data to existing values + +## obj spec.patchSets.patches.combine.string + +"String declares that input variables should be combined into a single\nstring, using the relevant settings for formatting purposes." + +### fn spec.patchSets.patches.combine.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +## obj spec.patchSets.patches.combine.variables + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.patchSets.patches.combine.variables.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the source whose value is\nto be used as input." + +## obj spec.patchSets.patches.policy + +"Policy configures the specifics of patching behaviour." + +### fn spec.patchSets.patches.policy.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath specifies how to patch from a field path. The default is\n'Optional', which means the patch will be a no-op if the specified\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\nthe specified path does not exist." + +## obj spec.patchSets.patches.policy.mergeOptions + +"MergeOptions Specifies merge options on a field path." + +### fn spec.patchSets.patches.policy.mergeOptions.withAppendSlice + +```ts +withAppendSlice(appendSlice) +``` + +"Specifies that already existing elements in a merged slice should be preserved" + +### fn spec.patchSets.patches.policy.mergeOptions.withKeepMapValues + +```ts +withKeepMapValues(keepMapValues) +``` + +"Specifies that already existing values in a merged map should be preserved" + +## obj spec.patchSets.patches.transforms + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.patchSets.patches.transforms.withMap + +```ts +withMap(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +### fn spec.patchSets.patches.transforms.withMapMixin + +```ts +withMapMixin(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +**Note:** This function appends passed data to existing values + +### fn spec.patchSets.patches.transforms.withType + +```ts +withType(type) +``` + +"Type of the transform to be run." + +## obj spec.patchSets.patches.transforms.convert + +"Convert is used to cast the input into the given output type." + +### fn spec.patchSets.patches.transforms.convert.withFormat + +```ts +withFormat(format) +``` + +"The expected input format.\n\n\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\nOnly used during `string -> float64` conversions.\n* `json` - parses the input as a JSON string.\nOnly used during `string -> object` or `string -> list` conversions.\n\n\nIf this property is null, the default conversion is applied." + +### fn spec.patchSets.patches.transforms.convert.withToType + +```ts +withToType(toType) +``` + +"ToType is the type of the output of this transform." + +## obj spec.patchSets.patches.transforms.match + +"Match is a more complex version of Map that matches a list of patterns." + +### fn spec.patchSets.patches.transforms.match.withFallbackTo + +```ts +withFallbackTo(fallbackTo) +``` + +"Determines to what value the transform should fallback if no pattern matches." + +### fn spec.patchSets.patches.transforms.match.withFallbackValue + +```ts +withFallbackValue(fallbackValue) +``` + +"The fallback value that should be returned by the transform if now pattern\nmatches." + +### fn spec.patchSets.patches.transforms.match.withPatterns + +```ts +withPatterns(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.patchSets.patches.transforms.match.withPatternsMixin + +```ts +withPatternsMixin(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +**Note:** This function appends passed data to existing values + +## obj spec.patchSets.patches.transforms.match.patterns + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.patchSets.patches.transforms.match.patterns.withLiteral + +```ts +withLiteral(literal) +``` + +"Literal exactly matches the input string (case sensitive).\nIs required if `type` is `literal`." + +### fn spec.patchSets.patches.transforms.match.patterns.withRegexp + +```ts +withRegexp(regexp) +``` + +"Regexp to match against the input string.\nIs required if `type` is `regexp`." + +### fn spec.patchSets.patches.transforms.match.patterns.withResult + +```ts +withResult(result) +``` + +"The value that is used as result of the transform if the pattern matches." + +### fn spec.patchSets.patches.transforms.match.patterns.withType + +```ts +withType(type) +``` + +"Type specifies how the pattern matches the input.\n\n\n* `literal` - the pattern value has to exactly match (case sensitive) the\ninput string. This is the default.\n\n\n* `regexp` - the pattern treated as a regular expression against\nwhich the input string is tested. Crossplane will throw an error if the\nkey is not a valid regexp." + +## obj spec.patchSets.patches.transforms.math + +"Math is used to transform the input via mathematical operations such as\nmultiplication." + +### fn spec.patchSets.patches.transforms.math.withClampMax + +```ts +withClampMax(clampMax) +``` + +"ClampMax makes sure that the value is not bigger than the given value." + +### fn spec.patchSets.patches.transforms.math.withClampMin + +```ts +withClampMin(clampMin) +``` + +"ClampMin makes sure that the value is not smaller than the given value." + +### fn spec.patchSets.patches.transforms.math.withMultiply + +```ts +withMultiply(multiply) +``` + +"Multiply the value." + +### fn spec.patchSets.patches.transforms.math.withType + +```ts +withType(type) +``` + +"Type of the math transform to be run." + +## obj spec.patchSets.patches.transforms.string + +"String is used to transform the input into a string or a different kind\nof string. Note that the input does not necessarily need to be a string." + +### fn spec.patchSets.patches.transforms.string.withConvert + +```ts +withConvert(convert) +``` + +"Optional conversion method to be specified.\n`ToUpper` and `ToLower` change the letter case of the input string.\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\n`ToJson` converts any input value into its raw JSON representation.\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\nconverted to JSON.\n`ToAdler32` generate a addler32 hash based on the input string." + +### fn spec.patchSets.patches.transforms.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +### fn spec.patchSets.patches.transforms.string.withTrim + +```ts +withTrim(trim) +``` + +"Trim the prefix or suffix from the input" + +### fn spec.patchSets.patches.transforms.string.withType + +```ts +withType(type) +``` + +"Type of the string transform to be run." + +## obj spec.patchSets.patches.transforms.string.join + +"Join defines parameters to join a slice of values to a string." + +### fn spec.patchSets.patches.transforms.string.join.withSeparator + +```ts +withSeparator(separator) +``` + +"Separator defines the character that should separate the values from each\nother in the joined string." + +## obj spec.patchSets.patches.transforms.string.regexp + +"Extract a match from the input using a regular expression." + +### fn spec.patchSets.patches.transforms.string.regexp.withGroup + +```ts +withGroup(group) +``` + +"Group number to match. 0 (the default) matches the entire expression." + +### fn spec.patchSets.patches.transforms.string.regexp.withMatch + +```ts +withMatch(match) +``` + +"Match string. May optionally include submatches, aka capture groups.\nSee https://pkg.go.dev/regexp/ for details." + +## obj spec.pipeline + +"Pipeline is a list of composition function steps that will be used when a\ncomposite resource referring to this composition is created. One of\nresources and pipeline must be specified - you cannot specify both.\n\n\nThe Pipeline is only used by the \"Pipeline\" mode of Composition. It is\nignored by other modes." + +### fn spec.pipeline.withCredentials + +```ts +withCredentials(credentials) +``` + +"Credentials are optional credentials that the Composition Function needs." + +### fn spec.pipeline.withCredentialsMixin + +```ts +withCredentialsMixin(credentials) +``` + +"Credentials are optional credentials that the Composition Function needs." + +**Note:** This function appends passed data to existing values + +### fn spec.pipeline.withInput + +```ts +withInput(input) +``` + +"Input is an optional, arbitrary Kubernetes resource (i.e. a resource\nwith an apiVersion and kind) that will be passed to the Composition\nFunction as the 'input' of its RunFunctionRequest." + +### fn spec.pipeline.withInputMixin + +```ts +withInputMixin(input) +``` + +"Input is an optional, arbitrary Kubernetes resource (i.e. a resource\nwith an apiVersion and kind) that will be passed to the Composition\nFunction as the 'input' of its RunFunctionRequest." + +**Note:** This function appends passed data to existing values + +### fn spec.pipeline.withStep + +```ts +withStep(step) +``` + +"Step name. Must be unique within its Pipeline." + +## obj spec.pipeline.credentials + +"Credentials are optional credentials that the Composition Function needs." + +### fn spec.pipeline.credentials.withName + +```ts +withName(name) +``` + +"Name of this set of credentials." + +### fn spec.pipeline.credentials.withSource + +```ts +withSource(source) +``` + +"Source of the function credentials." + +## obj spec.pipeline.credentials.secretRef + +"A SecretRef is a reference to a secret containing credentials that should\nbe supplied to the function." + +### fn spec.pipeline.credentials.secretRef.withName + +```ts +withName(name) +``` + +"Name of the secret." + +### fn spec.pipeline.credentials.secretRef.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace of the secret." + +## obj spec.pipeline.functionRef + +"FunctionRef is a reference to the Composition Function this step should\nexecute." + +### fn spec.pipeline.functionRef.withName + +```ts +withName(name) +``` + +"Name of the referenced Function." + +## obj spec.publishConnectionDetailsWithStoreConfigRef + +"PublishConnectionDetailsWithStoreConfig specifies the secret store config\nwith which the connection details of composite resources dynamically\nprovisioned using this composition will be published.\n\n\nTHIS IS AN ALPHA FIELD. Do not use it in production. It is not honored\nunless the relevant Crossplane feature flag is enabled, and may be\nchanged or removed without notice." + +### fn spec.publishConnectionDetailsWithStoreConfigRef.withName + +```ts +withName(name) +``` + +"Name of the referenced StoreConfig." + +## obj spec.resources + +"Resources is a list of resource templates that will be used when a\ncomposite resource referring to this composition is created.\n\n\nResources are only used by the \"Resources\" mode of Composition. They are\nignored by other modes.\n\n\nDeprecated: Use Composition Functions instead." + +### fn spec.resources.withBase + +```ts +withBase(base) +``` + +"Base is the target resource that the patches will be applied on." + +### fn spec.resources.withBaseMixin + +```ts +withBaseMixin(base) +``` + +"Base is the target resource that the patches will be applied on." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.withConnectionDetails + +```ts +withConnectionDetails(connectionDetails) +``` + +"ConnectionDetails lists the propagation secret keys from this target\nresource to the composition instance connection secret." + +### fn spec.resources.withConnectionDetailsMixin + +```ts +withConnectionDetailsMixin(connectionDetails) +``` + +"ConnectionDetails lists the propagation secret keys from this target\nresource to the composition instance connection secret." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.withName + +```ts +withName(name) +``` + +"A Name uniquely identifies this entry within its Composition's resources\narray. Names are optional but *strongly* recommended. When all entries in\nthe resources array are named entries may added, deleted, and reordered\nas long as their names do not change. When entries are not named the\nlength and order of the resources array should be treated as immutable.\nEither all or no entries must be named." + +### fn spec.resources.withPatches + +```ts +withPatches(patches) +``` + +"Patches will be applied as overlay to the base resource." + +### fn spec.resources.withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +"Patches will be applied as overlay to the base resource." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.withReadinessChecks + +```ts +withReadinessChecks(readinessChecks) +``` + +"ReadinessChecks allows users to define custom readiness checks. All checks\nhave to return true in order for resource to be considered ready. The\ndefault readiness check is to have the \"Ready\" condition to be \"True\"." + +### fn spec.resources.withReadinessChecksMixin + +```ts +withReadinessChecksMixin(readinessChecks) +``` + +"ReadinessChecks allows users to define custom readiness checks. All checks\nhave to return true in order for resource to be considered ready. The\ndefault readiness check is to have the \"Ready\" condition to be \"True\"." + +**Note:** This function appends passed data to existing values + +## obj spec.resources.connectionDetails + +"ConnectionDetails lists the propagation secret keys from this target\nresource to the composition instance connection secret." + +### fn spec.resources.connectionDetails.withFromConnectionSecretKey + +```ts +withFromConnectionSecretKey(fromConnectionSecretKey) +``` + +"FromConnectionSecretKey is the key that will be used to fetch the value\nfrom the composed resource's connection secret." + +### fn spec.resources.connectionDetails.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the composed resource whose\nvalue to be used as input. Name must be specified if the type is\nFromFieldPath." + +### fn spec.resources.connectionDetails.withName + +```ts +withName(name) +``` + +"Name of the connection secret key that will be propagated to the\nconnection secret of the composition instance. Leave empty if you'd like\nto use the same key name." + +### fn spec.resources.connectionDetails.withType + +```ts +withType(type) +``` + +"Type sets the connection detail fetching behaviour to be used. Each\nconnection detail type may require its own fields to be set on the\nConnectionDetail object. If the type is omitted Crossplane will attempt\nto infer it based on which other fields were specified. If multiple\nfields are specified the order of precedence is:\n1. FromValue\n2. FromConnectionSecretKey\n3. FromFieldPath" + +### fn spec.resources.connectionDetails.withValue + +```ts +withValue(value) +``` + +"Value that will be propagated to the connection secret of the composite\nresource. May be set to inject a fixed, non-sensitive connection secret\nvalue, for example a well-known port." + +## obj spec.resources.patches + +"Patches will be applied as overlay to the base resource." + +### fn spec.resources.patches.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the resource whose value is\nto be used as input. Required when type is FromCompositeFieldPath,\nFromEnvironmentFieldPath, ToCompositeFieldPath, ToEnvironmentFieldPath." + +### fn spec.resources.patches.withPatchSetName + +```ts +withPatchSetName(patchSetName) +``` + +"PatchSetName to include patches from. Required when type is PatchSet." + +### fn spec.resources.patches.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath is the path of the field on the resource whose value will\nbe changed with the result of transforms. Leave empty if you'd like to\npropagate to the same path as fromFieldPath." + +### fn spec.resources.patches.withTransforms + +```ts +withTransforms(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.resources.patches.withTransformsMixin + +```ts +withTransformsMixin(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.patches.withType + +```ts +withType(type) +``` + +"Type sets the patching behaviour to be used. Each patch type may require\nits own fields to be set on the Patch object." + +## obj spec.resources.patches.combine + +"Combine is the patch configuration for a CombineFromComposite,\nCombineFromEnvironment, CombineToComposite or CombineToEnvironment patch." + +### fn spec.resources.patches.combine.withStrategy + +```ts +withStrategy(strategy) +``` + +"Strategy defines the strategy to use to combine the input variable values.\nCurrently only string is supported." + +### fn spec.resources.patches.combine.withVariables + +```ts +withVariables(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.resources.patches.combine.withVariablesMixin + +```ts +withVariablesMixin(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +**Note:** This function appends passed data to existing values + +## obj spec.resources.patches.combine.string + +"String declares that input variables should be combined into a single\nstring, using the relevant settings for formatting purposes." + +### fn spec.resources.patches.combine.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +## obj spec.resources.patches.combine.variables + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn spec.resources.patches.combine.variables.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the source whose value is\nto be used as input." + +## obj spec.resources.patches.policy + +"Policy configures the specifics of patching behaviour." + +### fn spec.resources.patches.policy.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath specifies how to patch from a field path. The default is\n'Optional', which means the patch will be a no-op if the specified\nfromFieldPath does not exist. Use 'Required' if the patch should fail if\nthe specified path does not exist." + +## obj spec.resources.patches.policy.mergeOptions + +"MergeOptions Specifies merge options on a field path." + +### fn spec.resources.patches.policy.mergeOptions.withAppendSlice + +```ts +withAppendSlice(appendSlice) +``` + +"Specifies that already existing elements in a merged slice should be preserved" + +### fn spec.resources.patches.policy.mergeOptions.withKeepMapValues + +```ts +withKeepMapValues(keepMapValues) +``` + +"Specifies that already existing values in a merged map should be preserved" + +## obj spec.resources.patches.transforms + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn spec.resources.patches.transforms.withMap + +```ts +withMap(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +### fn spec.resources.patches.transforms.withMapMixin + +```ts +withMapMixin(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.patches.transforms.withType + +```ts +withType(type) +``` + +"Type of the transform to be run." + +## obj spec.resources.patches.transforms.convert + +"Convert is used to cast the input into the given output type." + +### fn spec.resources.patches.transforms.convert.withFormat + +```ts +withFormat(format) +``` + +"The expected input format.\n\n\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\nOnly used during `string -> float64` conversions.\n* `json` - parses the input as a JSON string.\nOnly used during `string -> object` or `string -> list` conversions.\n\n\nIf this property is null, the default conversion is applied." + +### fn spec.resources.patches.transforms.convert.withToType + +```ts +withToType(toType) +``` + +"ToType is the type of the output of this transform." + +## obj spec.resources.patches.transforms.match + +"Match is a more complex version of Map that matches a list of patterns." + +### fn spec.resources.patches.transforms.match.withFallbackTo + +```ts +withFallbackTo(fallbackTo) +``` + +"Determines to what value the transform should fallback if no pattern matches." + +### fn spec.resources.patches.transforms.match.withFallbackValue + +```ts +withFallbackValue(fallbackValue) +``` + +"The fallback value that should be returned by the transform if now pattern\nmatches." + +### fn spec.resources.patches.transforms.match.withPatterns + +```ts +withPatterns(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.resources.patches.transforms.match.withPatternsMixin + +```ts +withPatternsMixin(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +**Note:** This function appends passed data to existing values + +## obj spec.resources.patches.transforms.match.patterns + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn spec.resources.patches.transforms.match.patterns.withLiteral + +```ts +withLiteral(literal) +``` + +"Literal exactly matches the input string (case sensitive).\nIs required if `type` is `literal`." + +### fn spec.resources.patches.transforms.match.patterns.withRegexp + +```ts +withRegexp(regexp) +``` + +"Regexp to match against the input string.\nIs required if `type` is `regexp`." + +### fn spec.resources.patches.transforms.match.patterns.withResult + +```ts +withResult(result) +``` + +"The value that is used as result of the transform if the pattern matches." + +### fn spec.resources.patches.transforms.match.patterns.withType + +```ts +withType(type) +``` + +"Type specifies how the pattern matches the input.\n\n\n* `literal` - the pattern value has to exactly match (case sensitive) the\ninput string. This is the default.\n\n\n* `regexp` - the pattern treated as a regular expression against\nwhich the input string is tested. Crossplane will throw an error if the\nkey is not a valid regexp." + +## obj spec.resources.patches.transforms.math + +"Math is used to transform the input via mathematical operations such as\nmultiplication." + +### fn spec.resources.patches.transforms.math.withClampMax + +```ts +withClampMax(clampMax) +``` + +"ClampMax makes sure that the value is not bigger than the given value." + +### fn spec.resources.patches.transforms.math.withClampMin + +```ts +withClampMin(clampMin) +``` + +"ClampMin makes sure that the value is not smaller than the given value." + +### fn spec.resources.patches.transforms.math.withMultiply + +```ts +withMultiply(multiply) +``` + +"Multiply the value." + +### fn spec.resources.patches.transforms.math.withType + +```ts +withType(type) +``` + +"Type of the math transform to be run." + +## obj spec.resources.patches.transforms.string + +"String is used to transform the input into a string or a different kind\nof string. Note that the input does not necessarily need to be a string." + +### fn spec.resources.patches.transforms.string.withConvert + +```ts +withConvert(convert) +``` + +"Optional conversion method to be specified.\n`ToUpper` and `ToLower` change the letter case of the input string.\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\n`ToJson` converts any input value into its raw JSON representation.\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\nconverted to JSON.\n`ToAdler32` generate a addler32 hash based on the input string." + +### fn spec.resources.patches.transforms.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +### fn spec.resources.patches.transforms.string.withTrim + +```ts +withTrim(trim) +``` + +"Trim the prefix or suffix from the input" + +### fn spec.resources.patches.transforms.string.withType + +```ts +withType(type) +``` + +"Type of the string transform to be run." + +## obj spec.resources.patches.transforms.string.join + +"Join defines parameters to join a slice of values to a string." + +### fn spec.resources.patches.transforms.string.join.withSeparator + +```ts +withSeparator(separator) +``` + +"Separator defines the character that should separate the values from each\nother in the joined string." + +## obj spec.resources.patches.transforms.string.regexp + +"Extract a match from the input using a regular expression." + +### fn spec.resources.patches.transforms.string.regexp.withGroup + +```ts +withGroup(group) +``` + +"Group number to match. 0 (the default) matches the entire expression." + +### fn spec.resources.patches.transforms.string.regexp.withMatch + +```ts +withMatch(match) +``` + +"Match string. May optionally include submatches, aka capture groups.\nSee https://pkg.go.dev/regexp/ for details." + +## obj spec.resources.readinessChecks + +"ReadinessChecks allows users to define custom readiness checks. All checks\nhave to return true in order for resource to be considered ready. The\ndefault readiness check is to have the \"Ready\" condition to be \"True\"." + +### fn spec.resources.readinessChecks.withFieldPath + +```ts +withFieldPath(fieldPath) +``` + +"FieldPath shows the path of the field whose value will be used." + +### fn spec.resources.readinessChecks.withMatchInteger + +```ts +withMatchInteger(matchInteger) +``` + +"MatchInt is the value you'd like to match if you're using \"MatchInt\" type." + +### fn spec.resources.readinessChecks.withMatchString + +```ts +withMatchString(matchString) +``` + +"MatchString is the value you'd like to match if you're using \"MatchString\" type." + +### fn spec.resources.readinessChecks.withType + +```ts +withType(type) +``` + +"Type indicates the type of probe you'd like to use." + +## obj spec.resources.readinessChecks.matchCondition + +"MatchCondition specifies the condition you'd like to match if you're using \"MatchCondition\" type." + +### fn spec.resources.readinessChecks.matchCondition.withType + +```ts +withType(type) +``` + +"Type indicates the type of condition you'd like to use." \ No newline at end of file diff --git a/docs/crossplane/1.17/apiextensions/v1beta1/index.md b/docs/crossplane/1.17/apiextensions/v1beta1/index.md new file mode 100644 index 0000000..25beec2 --- /dev/null +++ b/docs/crossplane/1.17/apiextensions/v1beta1/index.md @@ -0,0 +1,9 @@ +--- +permalink: /crossplane/1.17/apiextensions/v1beta1/ +--- + +# apiextensions.v1beta1 + + + +* [compositionRevision](compositionRevision.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/meta/index.md b/docs/crossplane/1.17/meta/index.md new file mode 100644 index 0000000..6f69973 --- /dev/null +++ b/docs/crossplane/1.17/meta/index.md @@ -0,0 +1,11 @@ +--- +permalink: /crossplane/1.17/meta/ +--- + +# meta + + + +* [v1](v1/index.md) +* [v1alpha1](v1alpha1/index.md) +* [v1beta1](v1beta1/index.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/meta/v1/configuration.md b/docs/crossplane/1.17/meta/v1/configuration.md new file mode 100644 index 0000000..75ebd6e --- /dev/null +++ b/docs/crossplane/1.17/meta/v1/configuration.md @@ -0,0 +1,285 @@ +--- +permalink: /crossplane/1.17/meta/v1/configuration/ +--- + +# meta.v1.configuration + +"A Configuration is the description of a Crossplane Configuration package." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withDependsOn(dependsOn)`](#fn-specwithdependson) + * [`fn withDependsOnMixin(dependsOn)`](#fn-specwithdependsonmixin) + * [`obj spec.crossplane`](#obj-speccrossplane) + * [`fn withVersion(version)`](#fn-speccrossplanewithversion) + * [`obj spec.dependsOn`](#obj-specdependson) + * [`fn withConfiguration(configuration)`](#fn-specdependsonwithconfiguration) + * [`fn withFunction(Function)`](#fn-specdependsonwithfunction) + * [`fn withProvider(provider)`](#fn-specdependsonwithprovider) + * [`fn withVersion(version)`](#fn-specdependsonwithversion) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Configuration + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"ConfigurationSpec specifies the configuration of a Configuration." + +### fn spec.withDependsOn + +```ts +withDependsOn(dependsOn) +``` + +"Dependencies on other packages." + +### fn spec.withDependsOnMixin + +```ts +withDependsOnMixin(dependsOn) +``` + +"Dependencies on other packages." + +**Note:** This function appends passed data to existing values + +## obj spec.crossplane + +"Semantic version constraints of Crossplane that package is compatible with." + +### fn spec.crossplane.withVersion + +```ts +withVersion(version) +``` + +"Semantic version constraints of Crossplane that package is compatible with." + +## obj spec.dependsOn + +"Dependencies on other packages." + +### fn spec.dependsOn.withConfiguration + +```ts +withConfiguration(configuration) +``` + +"Configuration is the name of a Configuration package image." + +### fn spec.dependsOn.withFunction + +```ts +withFunction(Function) +``` + +"Function is the name of a Function package image." + +### fn spec.dependsOn.withProvider + +```ts +withProvider(provider) +``` + +"Provider is the name of a Provider package image." + +### fn spec.dependsOn.withVersion + +```ts +withVersion(version) +``` + +"Version is the semantic version constraints of the dependency image." \ No newline at end of file diff --git a/docs/crossplane/1.17/meta/v1/index.md b/docs/crossplane/1.17/meta/v1/index.md new file mode 100644 index 0000000..aa20a09 --- /dev/null +++ b/docs/crossplane/1.17/meta/v1/index.md @@ -0,0 +1,10 @@ +--- +permalink: /crossplane/1.17/meta/v1/ +--- + +# meta.v1 + + + +* [configuration](configuration.md) +* [provider](provider.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/meta/v1/provider.md b/docs/crossplane/1.17/meta/v1/provider.md new file mode 100644 index 0000000..d9652e5 --- /dev/null +++ b/docs/crossplane/1.17/meta/v1/provider.md @@ -0,0 +1,424 @@ +--- +permalink: /crossplane/1.17/meta/v1/provider/ +--- + +# meta.v1.provider + +"A Provider is the description of a Crossplane Provider package." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withDependsOn(dependsOn)`](#fn-specwithdependson) + * [`fn withDependsOnMixin(dependsOn)`](#fn-specwithdependsonmixin) + * [`obj spec.controller`](#obj-speccontroller) + * [`fn withImage(image)`](#fn-speccontrollerwithimage) + * [`fn withPermissionRequests(permissionRequests)`](#fn-speccontrollerwithpermissionrequests) + * [`fn withPermissionRequestsMixin(permissionRequests)`](#fn-speccontrollerwithpermissionrequestsmixin) + * [`obj spec.controller.permissionRequests`](#obj-speccontrollerpermissionrequests) + * [`fn withApiGroups(apiGroups)`](#fn-speccontrollerpermissionrequestswithapigroups) + * [`fn withApiGroupsMixin(apiGroups)`](#fn-speccontrollerpermissionrequestswithapigroupsmixin) + * [`fn withNonResourceURLs(nonResourceURLs)`](#fn-speccontrollerpermissionrequestswithnonresourceurls) + * [`fn withNonResourceURLsMixin(nonResourceURLs)`](#fn-speccontrollerpermissionrequestswithnonresourceurlsmixin) + * [`fn withResourceNames(resourceNames)`](#fn-speccontrollerpermissionrequestswithresourcenames) + * [`fn withResourceNamesMixin(resourceNames)`](#fn-speccontrollerpermissionrequestswithresourcenamesmixin) + * [`fn withResources(resources)`](#fn-speccontrollerpermissionrequestswithresources) + * [`fn withResourcesMixin(resources)`](#fn-speccontrollerpermissionrequestswithresourcesmixin) + * [`fn withVerbs(verbs)`](#fn-speccontrollerpermissionrequestswithverbs) + * [`fn withVerbsMixin(verbs)`](#fn-speccontrollerpermissionrequestswithverbsmixin) + * [`obj spec.crossplane`](#obj-speccrossplane) + * [`fn withVersion(version)`](#fn-speccrossplanewithversion) + * [`obj spec.dependsOn`](#obj-specdependson) + * [`fn withConfiguration(configuration)`](#fn-specdependsonwithconfiguration) + * [`fn withFunction(Function)`](#fn-specdependsonwithfunction) + * [`fn withProvider(provider)`](#fn-specdependsonwithprovider) + * [`fn withVersion(version)`](#fn-specdependsonwithversion) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Provider + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"ProviderSpec specifies the configuration of a Provider." + +### fn spec.withDependsOn + +```ts +withDependsOn(dependsOn) +``` + +"Dependencies on other packages." + +### fn spec.withDependsOnMixin + +```ts +withDependsOnMixin(dependsOn) +``` + +"Dependencies on other packages." + +**Note:** This function appends passed data to existing values + +## obj spec.controller + +"Configuration for the packaged Provider's controller." + +### fn spec.controller.withImage + +```ts +withImage(image) +``` + +"Image is the packaged Provider controller image." + +### fn spec.controller.withPermissionRequests + +```ts +withPermissionRequests(permissionRequests) +``` + +"PermissionRequests for RBAC rules required for this provider's controller\nto function. The RBAC manager is responsible for assessing the requested\npermissions." + +### fn spec.controller.withPermissionRequestsMixin + +```ts +withPermissionRequestsMixin(permissionRequests) +``` + +"PermissionRequests for RBAC rules required for this provider's controller\nto function. The RBAC manager is responsible for assessing the requested\npermissions." + +**Note:** This function appends passed data to existing values + +## obj spec.controller.permissionRequests + +"PermissionRequests for RBAC rules required for this provider's controller\nto function. The RBAC manager is responsible for assessing the requested\npermissions." + +### fn spec.controller.permissionRequests.withApiGroups + +```ts +withApiGroups(apiGroups) +``` + +"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of\nthe enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups." + +### fn spec.controller.permissionRequests.withApiGroupsMixin + +```ts +withApiGroupsMixin(apiGroups) +``` + +"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of\nthe enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups." + +**Note:** This function appends passed data to existing values + +### fn spec.controller.permissionRequests.withNonResourceURLs + +```ts +withNonResourceURLs(nonResourceURLs) +``` + +"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path\nSince non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.\nRules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both." + +### fn spec.controller.permissionRequests.withNonResourceURLsMixin + +```ts +withNonResourceURLsMixin(nonResourceURLs) +``` + +"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path\nSince non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.\nRules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both." + +**Note:** This function appends passed data to existing values + +### fn spec.controller.permissionRequests.withResourceNames + +```ts +withResourceNames(resourceNames) +``` + +"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed." + +### fn spec.controller.permissionRequests.withResourceNamesMixin + +```ts +withResourceNamesMixin(resourceNames) +``` + +"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed." + +**Note:** This function appends passed data to existing values + +### fn spec.controller.permissionRequests.withResources + +```ts +withResources(resources) +``` + +"Resources is a list of resources this rule applies to. '*' represents all resources." + +### fn spec.controller.permissionRequests.withResourcesMixin + +```ts +withResourcesMixin(resources) +``` + +"Resources is a list of resources this rule applies to. '*' represents all resources." + +**Note:** This function appends passed data to existing values + +### fn spec.controller.permissionRequests.withVerbs + +```ts +withVerbs(verbs) +``` + +"Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs." + +### fn spec.controller.permissionRequests.withVerbsMixin + +```ts +withVerbsMixin(verbs) +``` + +"Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs." + +**Note:** This function appends passed data to existing values + +## obj spec.crossplane + +"Semantic version constraints of Crossplane that package is compatible with." + +### fn spec.crossplane.withVersion + +```ts +withVersion(version) +``` + +"Semantic version constraints of Crossplane that package is compatible with." + +## obj spec.dependsOn + +"Dependencies on other packages." + +### fn spec.dependsOn.withConfiguration + +```ts +withConfiguration(configuration) +``` + +"Configuration is the name of a Configuration package image." + +### fn spec.dependsOn.withFunction + +```ts +withFunction(Function) +``` + +"Function is the name of a Function package image." + +### fn spec.dependsOn.withProvider + +```ts +withProvider(provider) +``` + +"Provider is the name of a Provider package image." + +### fn spec.dependsOn.withVersion + +```ts +withVersion(version) +``` + +"Version is the semantic version constraints of the dependency image." \ No newline at end of file diff --git a/docs/crossplane/1.17/meta/v1alpha1/configuration.md b/docs/crossplane/1.17/meta/v1alpha1/configuration.md new file mode 100644 index 0000000..9c0c045 --- /dev/null +++ b/docs/crossplane/1.17/meta/v1alpha1/configuration.md @@ -0,0 +1,285 @@ +--- +permalink: /crossplane/1.17/meta/v1alpha1/configuration/ +--- + +# meta.v1alpha1.configuration + +"A Configuration is the description of a Crossplane Configuration package." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withDependsOn(dependsOn)`](#fn-specwithdependson) + * [`fn withDependsOnMixin(dependsOn)`](#fn-specwithdependsonmixin) + * [`obj spec.crossplane`](#obj-speccrossplane) + * [`fn withVersion(version)`](#fn-speccrossplanewithversion) + * [`obj spec.dependsOn`](#obj-specdependson) + * [`fn withConfiguration(configuration)`](#fn-specdependsonwithconfiguration) + * [`fn withFunction(Function)`](#fn-specdependsonwithfunction) + * [`fn withProvider(provider)`](#fn-specdependsonwithprovider) + * [`fn withVersion(version)`](#fn-specdependsonwithversion) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Configuration + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"ConfigurationSpec specifies the configuration of a Configuration." + +### fn spec.withDependsOn + +```ts +withDependsOn(dependsOn) +``` + +"Dependencies on other packages." + +### fn spec.withDependsOnMixin + +```ts +withDependsOnMixin(dependsOn) +``` + +"Dependencies on other packages." + +**Note:** This function appends passed data to existing values + +## obj spec.crossplane + +"Semantic version constraints of Crossplane that package is compatible with." + +### fn spec.crossplane.withVersion + +```ts +withVersion(version) +``` + +"Semantic version constraints of Crossplane that package is compatible with." + +## obj spec.dependsOn + +"Dependencies on other packages." + +### fn spec.dependsOn.withConfiguration + +```ts +withConfiguration(configuration) +``` + +"Configuration is the name of a Configuration package image." + +### fn spec.dependsOn.withFunction + +```ts +withFunction(Function) +``` + +"Function is the name of a Function package image." + +### fn spec.dependsOn.withProvider + +```ts +withProvider(provider) +``` + +"Provider is the name of a Provider package image." + +### fn spec.dependsOn.withVersion + +```ts +withVersion(version) +``` + +"Version is the semantic version constraints of the dependency image." \ No newline at end of file diff --git a/docs/crossplane/1.17/meta/v1alpha1/index.md b/docs/crossplane/1.17/meta/v1alpha1/index.md new file mode 100644 index 0000000..6b372c4 --- /dev/null +++ b/docs/crossplane/1.17/meta/v1alpha1/index.md @@ -0,0 +1,10 @@ +--- +permalink: /crossplane/1.17/meta/v1alpha1/ +--- + +# meta.v1alpha1 + + + +* [configuration](configuration.md) +* [provider](provider.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/meta/v1alpha1/provider.md b/docs/crossplane/1.17/meta/v1alpha1/provider.md new file mode 100644 index 0000000..5962943 --- /dev/null +++ b/docs/crossplane/1.17/meta/v1alpha1/provider.md @@ -0,0 +1,424 @@ +--- +permalink: /crossplane/1.17/meta/v1alpha1/provider/ +--- + +# meta.v1alpha1.provider + +"A Provider is the description of a Crossplane Provider package." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withDependsOn(dependsOn)`](#fn-specwithdependson) + * [`fn withDependsOnMixin(dependsOn)`](#fn-specwithdependsonmixin) + * [`obj spec.controller`](#obj-speccontroller) + * [`fn withImage(image)`](#fn-speccontrollerwithimage) + * [`fn withPermissionRequests(permissionRequests)`](#fn-speccontrollerwithpermissionrequests) + * [`fn withPermissionRequestsMixin(permissionRequests)`](#fn-speccontrollerwithpermissionrequestsmixin) + * [`obj spec.controller.permissionRequests`](#obj-speccontrollerpermissionrequests) + * [`fn withApiGroups(apiGroups)`](#fn-speccontrollerpermissionrequestswithapigroups) + * [`fn withApiGroupsMixin(apiGroups)`](#fn-speccontrollerpermissionrequestswithapigroupsmixin) + * [`fn withNonResourceURLs(nonResourceURLs)`](#fn-speccontrollerpermissionrequestswithnonresourceurls) + * [`fn withNonResourceURLsMixin(nonResourceURLs)`](#fn-speccontrollerpermissionrequestswithnonresourceurlsmixin) + * [`fn withResourceNames(resourceNames)`](#fn-speccontrollerpermissionrequestswithresourcenames) + * [`fn withResourceNamesMixin(resourceNames)`](#fn-speccontrollerpermissionrequestswithresourcenamesmixin) + * [`fn withResources(resources)`](#fn-speccontrollerpermissionrequestswithresources) + * [`fn withResourcesMixin(resources)`](#fn-speccontrollerpermissionrequestswithresourcesmixin) + * [`fn withVerbs(verbs)`](#fn-speccontrollerpermissionrequestswithverbs) + * [`fn withVerbsMixin(verbs)`](#fn-speccontrollerpermissionrequestswithverbsmixin) + * [`obj spec.crossplane`](#obj-speccrossplane) + * [`fn withVersion(version)`](#fn-speccrossplanewithversion) + * [`obj spec.dependsOn`](#obj-specdependson) + * [`fn withConfiguration(configuration)`](#fn-specdependsonwithconfiguration) + * [`fn withFunction(Function)`](#fn-specdependsonwithfunction) + * [`fn withProvider(provider)`](#fn-specdependsonwithprovider) + * [`fn withVersion(version)`](#fn-specdependsonwithversion) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Provider + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"ProviderSpec specifies the configuration of a Provider." + +### fn spec.withDependsOn + +```ts +withDependsOn(dependsOn) +``` + +"Dependencies on other packages." + +### fn spec.withDependsOnMixin + +```ts +withDependsOnMixin(dependsOn) +``` + +"Dependencies on other packages." + +**Note:** This function appends passed data to existing values + +## obj spec.controller + +"Configuration for the packaged Provider's controller." + +### fn spec.controller.withImage + +```ts +withImage(image) +``` + +"Image is the packaged Provider controller image." + +### fn spec.controller.withPermissionRequests + +```ts +withPermissionRequests(permissionRequests) +``` + +"PermissionRequests for RBAC rules required for this provider's controller\nto function. The RBAC manager is responsible for assessing the requested\npermissions." + +### fn spec.controller.withPermissionRequestsMixin + +```ts +withPermissionRequestsMixin(permissionRequests) +``` + +"PermissionRequests for RBAC rules required for this provider's controller\nto function. The RBAC manager is responsible for assessing the requested\npermissions." + +**Note:** This function appends passed data to existing values + +## obj spec.controller.permissionRequests + +"PermissionRequests for RBAC rules required for this provider's controller\nto function. The RBAC manager is responsible for assessing the requested\npermissions." + +### fn spec.controller.permissionRequests.withApiGroups + +```ts +withApiGroups(apiGroups) +``` + +"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of\nthe enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups." + +### fn spec.controller.permissionRequests.withApiGroupsMixin + +```ts +withApiGroupsMixin(apiGroups) +``` + +"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of\nthe enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups." + +**Note:** This function appends passed data to existing values + +### fn spec.controller.permissionRequests.withNonResourceURLs + +```ts +withNonResourceURLs(nonResourceURLs) +``` + +"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path\nSince non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.\nRules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both." + +### fn spec.controller.permissionRequests.withNonResourceURLsMixin + +```ts +withNonResourceURLsMixin(nonResourceURLs) +``` + +"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path\nSince non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.\nRules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both." + +**Note:** This function appends passed data to existing values + +### fn spec.controller.permissionRequests.withResourceNames + +```ts +withResourceNames(resourceNames) +``` + +"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed." + +### fn spec.controller.permissionRequests.withResourceNamesMixin + +```ts +withResourceNamesMixin(resourceNames) +``` + +"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed." + +**Note:** This function appends passed data to existing values + +### fn spec.controller.permissionRequests.withResources + +```ts +withResources(resources) +``` + +"Resources is a list of resources this rule applies to. '*' represents all resources." + +### fn spec.controller.permissionRequests.withResourcesMixin + +```ts +withResourcesMixin(resources) +``` + +"Resources is a list of resources this rule applies to. '*' represents all resources." + +**Note:** This function appends passed data to existing values + +### fn spec.controller.permissionRequests.withVerbs + +```ts +withVerbs(verbs) +``` + +"Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs." + +### fn spec.controller.permissionRequests.withVerbsMixin + +```ts +withVerbsMixin(verbs) +``` + +"Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs." + +**Note:** This function appends passed data to existing values + +## obj spec.crossplane + +"Semantic version constraints of Crossplane that package is compatible with." + +### fn spec.crossplane.withVersion + +```ts +withVersion(version) +``` + +"Semantic version constraints of Crossplane that package is compatible with." + +## obj spec.dependsOn + +"Dependencies on other packages." + +### fn spec.dependsOn.withConfiguration + +```ts +withConfiguration(configuration) +``` + +"Configuration is the name of a Configuration package image." + +### fn spec.dependsOn.withFunction + +```ts +withFunction(Function) +``` + +"Function is the name of a Function package image." + +### fn spec.dependsOn.withProvider + +```ts +withProvider(provider) +``` + +"Provider is the name of a Provider package image." + +### fn spec.dependsOn.withVersion + +```ts +withVersion(version) +``` + +"Version is the semantic version constraints of the dependency image." \ No newline at end of file diff --git a/docs/crossplane/1.17/meta/v1beta1/function.md b/docs/crossplane/1.17/meta/v1beta1/function.md new file mode 100644 index 0000000..f823ab4 --- /dev/null +++ b/docs/crossplane/1.17/meta/v1beta1/function.md @@ -0,0 +1,294 @@ +--- +permalink: /crossplane/1.17/meta/v1beta1/function/ +--- + +# meta.v1beta1.function + +"A Function is the description of a Crossplane Function package." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withDependsOn(dependsOn)`](#fn-specwithdependson) + * [`fn withDependsOnMixin(dependsOn)`](#fn-specwithdependsonmixin) + * [`fn withImage(image)`](#fn-specwithimage) + * [`obj spec.crossplane`](#obj-speccrossplane) + * [`fn withVersion(version)`](#fn-speccrossplanewithversion) + * [`obj spec.dependsOn`](#obj-specdependson) + * [`fn withConfiguration(configuration)`](#fn-specdependsonwithconfiguration) + * [`fn withFunction(Function)`](#fn-specdependsonwithfunction) + * [`fn withProvider(provider)`](#fn-specdependsonwithprovider) + * [`fn withVersion(version)`](#fn-specdependsonwithversion) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Function + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"FunctionSpec specifies the configuration of a Function." + +### fn spec.withDependsOn + +```ts +withDependsOn(dependsOn) +``` + +"Dependencies on other packages." + +### fn spec.withDependsOnMixin + +```ts +withDependsOnMixin(dependsOn) +``` + +"Dependencies on other packages." + +**Note:** This function appends passed data to existing values + +### fn spec.withImage + +```ts +withImage(image) +``` + +"Image is the packaged Function image." + +## obj spec.crossplane + +"Semantic version constraints of Crossplane that package is compatible with." + +### fn spec.crossplane.withVersion + +```ts +withVersion(version) +``` + +"Semantic version constraints of Crossplane that package is compatible with." + +## obj spec.dependsOn + +"Dependencies on other packages." + +### fn spec.dependsOn.withConfiguration + +```ts +withConfiguration(configuration) +``` + +"Configuration is the name of a Configuration package image." + +### fn spec.dependsOn.withFunction + +```ts +withFunction(Function) +``` + +"Function is the name of a Function package image." + +### fn spec.dependsOn.withProvider + +```ts +withProvider(provider) +``` + +"Provider is the name of a Provider package image." + +### fn spec.dependsOn.withVersion + +```ts +withVersion(version) +``` + +"Version is the semantic version constraints of the dependency image." \ No newline at end of file diff --git a/docs/crossplane/1.17/meta/v1beta1/index.md b/docs/crossplane/1.17/meta/v1beta1/index.md new file mode 100644 index 0000000..adf073d --- /dev/null +++ b/docs/crossplane/1.17/meta/v1beta1/index.md @@ -0,0 +1,9 @@ +--- +permalink: /crossplane/1.17/meta/v1beta1/ +--- + +# meta.v1beta1 + + + +* [function](function.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/index.md b/docs/crossplane/1.17/pkg/index.md new file mode 100644 index 0000000..2269b25 --- /dev/null +++ b/docs/crossplane/1.17/pkg/index.md @@ -0,0 +1,11 @@ +--- +permalink: /crossplane/1.17/pkg/ +--- + +# pkg + + + +* [v1](v1/index.md) +* [v1alpha1](v1alpha1/index.md) +* [v1beta1](v1beta1/index.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1/configuration.md b/docs/crossplane/1.17/pkg/v1/configuration.md new file mode 100644 index 0000000..627b95b --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1/configuration.md @@ -0,0 +1,318 @@ +--- +permalink: /crossplane/1.17/pkg/v1/configuration/ +--- + +# pkg.v1.configuration + +"A Configuration installs an OCI compatible Crossplane package, extending\nCrossplane with support for new kinds of CompositeResourceDefinitions and\nCompositions.\n\n\nRead the Crossplane documentation for\n[more information about Configuration packages](https://docs.crossplane.io/latest/concepts/packages)." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withCommonLabels(commonLabels)`](#fn-specwithcommonlabels) + * [`fn withCommonLabelsMixin(commonLabels)`](#fn-specwithcommonlabelsmixin) + * [`fn withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints)`](#fn-specwithignorecrossplaneconstraints) + * [`fn withPackage(package)`](#fn-specwithpackage) + * [`fn withPackagePullPolicy(packagePullPolicy)`](#fn-specwithpackagepullpolicy) + * [`fn withPackagePullSecrets(packagePullSecrets)`](#fn-specwithpackagepullsecrets) + * [`fn withPackagePullSecretsMixin(packagePullSecrets)`](#fn-specwithpackagepullsecretsmixin) + * [`fn withRevisionActivationPolicy(revisionActivationPolicy)`](#fn-specwithrevisionactivationpolicy) + * [`fn withRevisionHistoryLimit(revisionHistoryLimit)`](#fn-specwithrevisionhistorylimit) + * [`fn withSkipDependencyResolution(skipDependencyResolution)`](#fn-specwithskipdependencyresolution) + * [`obj spec.packagePullSecrets`](#obj-specpackagepullsecrets) + * [`fn withName(name)`](#fn-specpackagepullsecretswithname) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Configuration + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"ConfigurationSpec specifies details about a request to install a\nconfiguration to Crossplane." + +### fn spec.withCommonLabels + +```ts +withCommonLabels(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +### fn spec.withCommonLabelsMixin + +```ts +withCommonLabelsMixin(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +**Note:** This function appends passed data to existing values + +### fn spec.withIgnoreCrossplaneConstraints + +```ts +withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints) +``` + +"IgnoreCrossplaneConstraints indicates to the package manager whether to\nhonor Crossplane version constrains specified by the package.\nDefault is false." + +### fn spec.withPackage + +```ts +withPackage(package) +``` + +"Package is the name of the package that is being requested." + +### fn spec.withPackagePullPolicy + +```ts +withPackagePullPolicy(packagePullPolicy) +``` + +"PackagePullPolicy defines the pull policy for the package.\nDefault is IfNotPresent." + +### fn spec.withPackagePullSecrets + +```ts +withPackagePullSecrets(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be used\nto fetch packages from private registries." + +### fn spec.withPackagePullSecretsMixin + +```ts +withPackagePullSecretsMixin(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be used\nto fetch packages from private registries." + +**Note:** This function appends passed data to existing values + +### fn spec.withRevisionActivationPolicy + +```ts +withRevisionActivationPolicy(revisionActivationPolicy) +``` + +"RevisionActivationPolicy specifies how the package controller should\nupdate from one revision to the next. Options are Automatic or Manual.\nDefault is Automatic." + +### fn spec.withRevisionHistoryLimit + +```ts +withRevisionHistoryLimit(revisionHistoryLimit) +``` + +"RevisionHistoryLimit dictates how the package controller cleans up old\ninactive package revisions.\nDefaults to 1. Can be disabled by explicitly setting to 0." + +### fn spec.withSkipDependencyResolution + +```ts +withSkipDependencyResolution(skipDependencyResolution) +``` + +"SkipDependencyResolution indicates to the package manager whether to skip\nresolving dependencies for a package. Setting this value to true may have\nunintended consequences.\nDefault is false." + +## obj spec.packagePullSecrets + +"PackagePullSecrets are named secrets in the same namespace that can be used\nto fetch packages from private registries." + +### fn spec.packagePullSecrets.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1/configurationRevision.md b/docs/crossplane/1.17/pkg/v1/configurationRevision.md new file mode 100644 index 0000000..6105d67 --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1/configurationRevision.md @@ -0,0 +1,318 @@ +--- +permalink: /crossplane/1.17/pkg/v1/configurationRevision/ +--- + +# pkg.v1.configurationRevision + +"A ConfigurationRevision represents a revision of a Configuration. Crossplane\ncreates new revisions when there are changes to a Configuration.\n\n\nCrossplane creates and manages ConfigurationRevision. Don't directly edit\nConfigurationRevisions." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withCommonLabels(commonLabels)`](#fn-specwithcommonlabels) + * [`fn withCommonLabelsMixin(commonLabels)`](#fn-specwithcommonlabelsmixin) + * [`fn withDesiredState(desiredState)`](#fn-specwithdesiredstate) + * [`fn withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints)`](#fn-specwithignorecrossplaneconstraints) + * [`fn withImage(image)`](#fn-specwithimage) + * [`fn withPackagePullPolicy(packagePullPolicy)`](#fn-specwithpackagepullpolicy) + * [`fn withPackagePullSecrets(packagePullSecrets)`](#fn-specwithpackagepullsecrets) + * [`fn withPackagePullSecretsMixin(packagePullSecrets)`](#fn-specwithpackagepullsecretsmixin) + * [`fn withRevision(revision)`](#fn-specwithrevision) + * [`fn withSkipDependencyResolution(skipDependencyResolution)`](#fn-specwithskipdependencyresolution) + * [`obj spec.packagePullSecrets`](#obj-specpackagepullsecrets) + * [`fn withName(name)`](#fn-specpackagepullsecretswithname) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of ConfigurationRevision + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"PackageRevisionSpec specifies the desired state of a PackageRevision." + +### fn spec.withCommonLabels + +```ts +withCommonLabels(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +### fn spec.withCommonLabelsMixin + +```ts +withCommonLabelsMixin(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +**Note:** This function appends passed data to existing values + +### fn spec.withDesiredState + +```ts +withDesiredState(desiredState) +``` + +"DesiredState of the PackageRevision. Can be either Active or Inactive." + +### fn spec.withIgnoreCrossplaneConstraints + +```ts +withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints) +``` + +"IgnoreCrossplaneConstraints indicates to the package manager whether to\nhonor Crossplane version constrains specified by the package.\nDefault is false." + +### fn spec.withImage + +```ts +withImage(image) +``` + +"Package image used by install Pod to extract package contents." + +### fn spec.withPackagePullPolicy + +```ts +withPackagePullPolicy(packagePullPolicy) +``` + +"PackagePullPolicy defines the pull policy for the package. It is also\napplied to any images pulled for the package, such as a provider's\ncontroller image.\nDefault is IfNotPresent." + +### fn spec.withPackagePullSecrets + +```ts +withPackagePullSecrets(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be\nused to fetch packages from private registries. They are also applied to\nany images pulled for the package, such as a provider's controller image." + +### fn spec.withPackagePullSecretsMixin + +```ts +withPackagePullSecretsMixin(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be\nused to fetch packages from private registries. They are also applied to\nany images pulled for the package, such as a provider's controller image." + +**Note:** This function appends passed data to existing values + +### fn spec.withRevision + +```ts +withRevision(revision) +``` + +"Revision number. Indicates when the revision will be garbage collected\nbased on the parent's RevisionHistoryLimit." + +### fn spec.withSkipDependencyResolution + +```ts +withSkipDependencyResolution(skipDependencyResolution) +``` + +"SkipDependencyResolution indicates to the package manager whether to skip\nresolving dependencies for a package. Setting this value to true may have\nunintended consequences.\nDefault is false." + +## obj spec.packagePullSecrets + +"PackagePullSecrets are named secrets in the same namespace that can be\nused to fetch packages from private registries. They are also applied to\nany images pulled for the package, such as a provider's controller image." + +### fn spec.packagePullSecrets.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1/function.md b/docs/crossplane/1.17/pkg/v1/function.md new file mode 100644 index 0000000..39fd0a2 --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1/function.md @@ -0,0 +1,364 @@ +--- +permalink: /crossplane/1.17/pkg/v1/function/ +--- + +# pkg.v1.function + +"A Function installs an OCI compatible Crossplane package, extending\nCrossplane with support for a new kind of composition function.\n\n\nRead the Crossplane documentation for\n[more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions)." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withCommonLabels(commonLabels)`](#fn-specwithcommonlabels) + * [`fn withCommonLabelsMixin(commonLabels)`](#fn-specwithcommonlabelsmixin) + * [`fn withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints)`](#fn-specwithignorecrossplaneconstraints) + * [`fn withPackage(package)`](#fn-specwithpackage) + * [`fn withPackagePullPolicy(packagePullPolicy)`](#fn-specwithpackagepullpolicy) + * [`fn withPackagePullSecrets(packagePullSecrets)`](#fn-specwithpackagepullsecrets) + * [`fn withPackagePullSecretsMixin(packagePullSecrets)`](#fn-specwithpackagepullsecretsmixin) + * [`fn withRevisionActivationPolicy(revisionActivationPolicy)`](#fn-specwithrevisionactivationpolicy) + * [`fn withRevisionHistoryLimit(revisionHistoryLimit)`](#fn-specwithrevisionhistorylimit) + * [`fn withSkipDependencyResolution(skipDependencyResolution)`](#fn-specwithskipdependencyresolution) + * [`obj spec.controllerConfigRef`](#obj-speccontrollerconfigref) + * [`fn withName(name)`](#fn-speccontrollerconfigrefwithname) + * [`obj spec.packagePullSecrets`](#obj-specpackagepullsecrets) + * [`fn withName(name)`](#fn-specpackagepullsecretswithname) + * [`obj spec.runtimeConfigRef`](#obj-specruntimeconfigref) + * [`fn withApiVersion(apiVersion)`](#fn-specruntimeconfigrefwithapiversion) + * [`fn withKind(kind)`](#fn-specruntimeconfigrefwithkind) + * [`fn withName(name)`](#fn-specruntimeconfigrefwithname) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Function + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"FunctionSpec specifies the configuration of a Function." + +### fn spec.withCommonLabels + +```ts +withCommonLabels(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +### fn spec.withCommonLabelsMixin + +```ts +withCommonLabelsMixin(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +**Note:** This function appends passed data to existing values + +### fn spec.withIgnoreCrossplaneConstraints + +```ts +withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints) +``` + +"IgnoreCrossplaneConstraints indicates to the package manager whether to\nhonor Crossplane version constrains specified by the package.\nDefault is false." + +### fn spec.withPackage + +```ts +withPackage(package) +``` + +"Package is the name of the package that is being requested." + +### fn spec.withPackagePullPolicy + +```ts +withPackagePullPolicy(packagePullPolicy) +``` + +"PackagePullPolicy defines the pull policy for the package.\nDefault is IfNotPresent." + +### fn spec.withPackagePullSecrets + +```ts +withPackagePullSecrets(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be used\nto fetch packages from private registries." + +### fn spec.withPackagePullSecretsMixin + +```ts +withPackagePullSecretsMixin(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be used\nto fetch packages from private registries." + +**Note:** This function appends passed data to existing values + +### fn spec.withRevisionActivationPolicy + +```ts +withRevisionActivationPolicy(revisionActivationPolicy) +``` + +"RevisionActivationPolicy specifies how the package controller should\nupdate from one revision to the next. Options are Automatic or Manual.\nDefault is Automatic." + +### fn spec.withRevisionHistoryLimit + +```ts +withRevisionHistoryLimit(revisionHistoryLimit) +``` + +"RevisionHistoryLimit dictates how the package controller cleans up old\ninactive package revisions.\nDefaults to 1. Can be disabled by explicitly setting to 0." + +### fn spec.withSkipDependencyResolution + +```ts +withSkipDependencyResolution(skipDependencyResolution) +``` + +"SkipDependencyResolution indicates to the package manager whether to skip\nresolving dependencies for a package. Setting this value to true may have\nunintended consequences.\nDefault is false." + +## obj spec.controllerConfigRef + +"ControllerConfigRef references a ControllerConfig resource that will be\nused to configure the packaged controller Deployment.\nDeprecated: Use RuntimeConfigReference instead." + +### fn spec.controllerConfigRef.withName + +```ts +withName(name) +``` + +"Name of the ControllerConfig." + +## obj spec.packagePullSecrets + +"PackagePullSecrets are named secrets in the same namespace that can be used\nto fetch packages from private registries." + +### fn spec.packagePullSecrets.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.runtimeConfigRef + +"RuntimeConfigRef references a RuntimeConfig resource that will be used\nto configure the package runtime." + +### fn spec.runtimeConfigRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"API version of the referent." + +### fn spec.runtimeConfigRef.withKind + +```ts +withKind(kind) +``` + +"Kind of the referent." + +### fn spec.runtimeConfigRef.withName + +```ts +withName(name) +``` + +"Name of the RuntimeConfig." \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1/functionRevision.md b/docs/crossplane/1.17/pkg/v1/functionRevision.md new file mode 100644 index 0000000..b6703dd --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1/functionRevision.md @@ -0,0 +1,382 @@ +--- +permalink: /crossplane/1.17/pkg/v1/functionRevision/ +--- + +# pkg.v1.functionRevision + +"A FunctionRevision represents a revision of a Function. Crossplane\ncreates new revisions when there are changes to the Function.\n\n\nCrossplane creates and manages FunctionRevisions. Don't directly edit\nFunctionRevisions." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withCommonLabels(commonLabels)`](#fn-specwithcommonlabels) + * [`fn withCommonLabelsMixin(commonLabels)`](#fn-specwithcommonlabelsmixin) + * [`fn withDesiredState(desiredState)`](#fn-specwithdesiredstate) + * [`fn withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints)`](#fn-specwithignorecrossplaneconstraints) + * [`fn withImage(image)`](#fn-specwithimage) + * [`fn withPackagePullPolicy(packagePullPolicy)`](#fn-specwithpackagepullpolicy) + * [`fn withPackagePullSecrets(packagePullSecrets)`](#fn-specwithpackagepullsecrets) + * [`fn withPackagePullSecretsMixin(packagePullSecrets)`](#fn-specwithpackagepullsecretsmixin) + * [`fn withRevision(revision)`](#fn-specwithrevision) + * [`fn withSkipDependencyResolution(skipDependencyResolution)`](#fn-specwithskipdependencyresolution) + * [`fn withTlsClientSecretName(tlsClientSecretName)`](#fn-specwithtlsclientsecretname) + * [`fn withTlsServerSecretName(tlsServerSecretName)`](#fn-specwithtlsserversecretname) + * [`obj spec.controllerConfigRef`](#obj-speccontrollerconfigref) + * [`fn withName(name)`](#fn-speccontrollerconfigrefwithname) + * [`obj spec.packagePullSecrets`](#obj-specpackagepullsecrets) + * [`fn withName(name)`](#fn-specpackagepullsecretswithname) + * [`obj spec.runtimeConfigRef`](#obj-specruntimeconfigref) + * [`fn withApiVersion(apiVersion)`](#fn-specruntimeconfigrefwithapiversion) + * [`fn withKind(kind)`](#fn-specruntimeconfigrefwithkind) + * [`fn withName(name)`](#fn-specruntimeconfigrefwithname) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of FunctionRevision + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"FunctionRevisionSpec specifies configuration for a FunctionRevision." + +### fn spec.withCommonLabels + +```ts +withCommonLabels(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +### fn spec.withCommonLabelsMixin + +```ts +withCommonLabelsMixin(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +**Note:** This function appends passed data to existing values + +### fn spec.withDesiredState + +```ts +withDesiredState(desiredState) +``` + +"DesiredState of the PackageRevision. Can be either Active or Inactive." + +### fn spec.withIgnoreCrossplaneConstraints + +```ts +withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints) +``` + +"IgnoreCrossplaneConstraints indicates to the package manager whether to\nhonor Crossplane version constrains specified by the package.\nDefault is false." + +### fn spec.withImage + +```ts +withImage(image) +``` + +"Package image used by install Pod to extract package contents." + +### fn spec.withPackagePullPolicy + +```ts +withPackagePullPolicy(packagePullPolicy) +``` + +"PackagePullPolicy defines the pull policy for the package. It is also\napplied to any images pulled for the package, such as a provider's\ncontroller image.\nDefault is IfNotPresent." + +### fn spec.withPackagePullSecrets + +```ts +withPackagePullSecrets(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be\nused to fetch packages from private registries. They are also applied to\nany images pulled for the package, such as a provider's controller image." + +### fn spec.withPackagePullSecretsMixin + +```ts +withPackagePullSecretsMixin(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be\nused to fetch packages from private registries. They are also applied to\nany images pulled for the package, such as a provider's controller image." + +**Note:** This function appends passed data to existing values + +### fn spec.withRevision + +```ts +withRevision(revision) +``` + +"Revision number. Indicates when the revision will be garbage collected\nbased on the parent's RevisionHistoryLimit." + +### fn spec.withSkipDependencyResolution + +```ts +withSkipDependencyResolution(skipDependencyResolution) +``` + +"SkipDependencyResolution indicates to the package manager whether to skip\nresolving dependencies for a package. Setting this value to true may have\nunintended consequences.\nDefault is false." + +### fn spec.withTlsClientSecretName + +```ts +withTlsClientSecretName(tlsClientSecretName) +``` + +"TLSClientSecretName is the name of the TLS Secret that stores client\ncertificates of the Provider." + +### fn spec.withTlsServerSecretName + +```ts +withTlsServerSecretName(tlsServerSecretName) +``` + +"TLSServerSecretName is the name of the TLS Secret that stores server\ncertificates of the Provider." + +## obj spec.controllerConfigRef + +"ControllerConfigRef references a ControllerConfig resource that will be\nused to configure the packaged controller Deployment.\nDeprecated: Use RuntimeConfigReference instead." + +### fn spec.controllerConfigRef.withName + +```ts +withName(name) +``` + +"Name of the ControllerConfig." + +## obj spec.packagePullSecrets + +"PackagePullSecrets are named secrets in the same namespace that can be\nused to fetch packages from private registries. They are also applied to\nany images pulled for the package, such as a provider's controller image." + +### fn spec.packagePullSecrets.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.runtimeConfigRef + +"RuntimeConfigRef references a RuntimeConfig resource that will be used\nto configure the package runtime." + +### fn spec.runtimeConfigRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"API version of the referent." + +### fn spec.runtimeConfigRef.withKind + +```ts +withKind(kind) +``` + +"Kind of the referent." + +### fn spec.runtimeConfigRef.withName + +```ts +withName(name) +``` + +"Name of the RuntimeConfig." \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1/index.md b/docs/crossplane/1.17/pkg/v1/index.md new file mode 100644 index 0000000..dca0f66 --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1/index.md @@ -0,0 +1,14 @@ +--- +permalink: /crossplane/1.17/pkg/v1/ +--- + +# pkg.v1 + + + +* [configuration](configuration.md) +* [configurationRevision](configurationRevision.md) +* [function](function.md) +* [functionRevision](functionRevision.md) +* [provider](provider.md) +* [providerRevision](providerRevision.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1/provider.md b/docs/crossplane/1.17/pkg/v1/provider.md new file mode 100644 index 0000000..e67c824 --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1/provider.md @@ -0,0 +1,364 @@ +--- +permalink: /crossplane/1.17/pkg/v1/provider/ +--- + +# pkg.v1.provider + +"A Provider installs an OCI compatible Crossplane package, extending\nCrossplane with support for new kinds of managed resources.\n\n\nRead the Crossplane documentation for\n[more information about Providers](https://docs.crossplane.io/latest/concepts/providers)." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withCommonLabels(commonLabels)`](#fn-specwithcommonlabels) + * [`fn withCommonLabelsMixin(commonLabels)`](#fn-specwithcommonlabelsmixin) + * [`fn withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints)`](#fn-specwithignorecrossplaneconstraints) + * [`fn withPackage(package)`](#fn-specwithpackage) + * [`fn withPackagePullPolicy(packagePullPolicy)`](#fn-specwithpackagepullpolicy) + * [`fn withPackagePullSecrets(packagePullSecrets)`](#fn-specwithpackagepullsecrets) + * [`fn withPackagePullSecretsMixin(packagePullSecrets)`](#fn-specwithpackagepullsecretsmixin) + * [`fn withRevisionActivationPolicy(revisionActivationPolicy)`](#fn-specwithrevisionactivationpolicy) + * [`fn withRevisionHistoryLimit(revisionHistoryLimit)`](#fn-specwithrevisionhistorylimit) + * [`fn withSkipDependencyResolution(skipDependencyResolution)`](#fn-specwithskipdependencyresolution) + * [`obj spec.controllerConfigRef`](#obj-speccontrollerconfigref) + * [`fn withName(name)`](#fn-speccontrollerconfigrefwithname) + * [`obj spec.packagePullSecrets`](#obj-specpackagepullsecrets) + * [`fn withName(name)`](#fn-specpackagepullsecretswithname) + * [`obj spec.runtimeConfigRef`](#obj-specruntimeconfigref) + * [`fn withApiVersion(apiVersion)`](#fn-specruntimeconfigrefwithapiversion) + * [`fn withKind(kind)`](#fn-specruntimeconfigrefwithkind) + * [`fn withName(name)`](#fn-specruntimeconfigrefwithname) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Provider + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"ProviderSpec specifies details about a request to install a provider to\nCrossplane." + +### fn spec.withCommonLabels + +```ts +withCommonLabels(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +### fn spec.withCommonLabelsMixin + +```ts +withCommonLabelsMixin(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +**Note:** This function appends passed data to existing values + +### fn spec.withIgnoreCrossplaneConstraints + +```ts +withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints) +``` + +"IgnoreCrossplaneConstraints indicates to the package manager whether to\nhonor Crossplane version constrains specified by the package.\nDefault is false." + +### fn spec.withPackage + +```ts +withPackage(package) +``` + +"Package is the name of the package that is being requested." + +### fn spec.withPackagePullPolicy + +```ts +withPackagePullPolicy(packagePullPolicy) +``` + +"PackagePullPolicy defines the pull policy for the package.\nDefault is IfNotPresent." + +### fn spec.withPackagePullSecrets + +```ts +withPackagePullSecrets(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be used\nto fetch packages from private registries." + +### fn spec.withPackagePullSecretsMixin + +```ts +withPackagePullSecretsMixin(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be used\nto fetch packages from private registries." + +**Note:** This function appends passed data to existing values + +### fn spec.withRevisionActivationPolicy + +```ts +withRevisionActivationPolicy(revisionActivationPolicy) +``` + +"RevisionActivationPolicy specifies how the package controller should\nupdate from one revision to the next. Options are Automatic or Manual.\nDefault is Automatic." + +### fn spec.withRevisionHistoryLimit + +```ts +withRevisionHistoryLimit(revisionHistoryLimit) +``` + +"RevisionHistoryLimit dictates how the package controller cleans up old\ninactive package revisions.\nDefaults to 1. Can be disabled by explicitly setting to 0." + +### fn spec.withSkipDependencyResolution + +```ts +withSkipDependencyResolution(skipDependencyResolution) +``` + +"SkipDependencyResolution indicates to the package manager whether to skip\nresolving dependencies for a package. Setting this value to true may have\nunintended consequences.\nDefault is false." + +## obj spec.controllerConfigRef + +"ControllerConfigRef references a ControllerConfig resource that will be\nused to configure the packaged controller Deployment.\nDeprecated: Use RuntimeConfigReference instead." + +### fn spec.controllerConfigRef.withName + +```ts +withName(name) +``` + +"Name of the ControllerConfig." + +## obj spec.packagePullSecrets + +"PackagePullSecrets are named secrets in the same namespace that can be used\nto fetch packages from private registries." + +### fn spec.packagePullSecrets.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.runtimeConfigRef + +"RuntimeConfigRef references a RuntimeConfig resource that will be used\nto configure the package runtime." + +### fn spec.runtimeConfigRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"API version of the referent." + +### fn spec.runtimeConfigRef.withKind + +```ts +withKind(kind) +``` + +"Kind of the referent." + +### fn spec.runtimeConfigRef.withName + +```ts +withName(name) +``` + +"Name of the RuntimeConfig." \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1/providerRevision.md b/docs/crossplane/1.17/pkg/v1/providerRevision.md new file mode 100644 index 0000000..7a65c3e --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1/providerRevision.md @@ -0,0 +1,382 @@ +--- +permalink: /crossplane/1.17/pkg/v1/providerRevision/ +--- + +# pkg.v1.providerRevision + +"A ProviderRevision represents a revision of a Provider. Crossplane\ncreates new revisions when there are changes to a Provider.\n\n\nCrossplane creates and manages ProviderRevisions. Don't directly edit\nProviderRevisions." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withCommonLabels(commonLabels)`](#fn-specwithcommonlabels) + * [`fn withCommonLabelsMixin(commonLabels)`](#fn-specwithcommonlabelsmixin) + * [`fn withDesiredState(desiredState)`](#fn-specwithdesiredstate) + * [`fn withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints)`](#fn-specwithignorecrossplaneconstraints) + * [`fn withImage(image)`](#fn-specwithimage) + * [`fn withPackagePullPolicy(packagePullPolicy)`](#fn-specwithpackagepullpolicy) + * [`fn withPackagePullSecrets(packagePullSecrets)`](#fn-specwithpackagepullsecrets) + * [`fn withPackagePullSecretsMixin(packagePullSecrets)`](#fn-specwithpackagepullsecretsmixin) + * [`fn withRevision(revision)`](#fn-specwithrevision) + * [`fn withSkipDependencyResolution(skipDependencyResolution)`](#fn-specwithskipdependencyresolution) + * [`fn withTlsClientSecretName(tlsClientSecretName)`](#fn-specwithtlsclientsecretname) + * [`fn withTlsServerSecretName(tlsServerSecretName)`](#fn-specwithtlsserversecretname) + * [`obj spec.controllerConfigRef`](#obj-speccontrollerconfigref) + * [`fn withName(name)`](#fn-speccontrollerconfigrefwithname) + * [`obj spec.packagePullSecrets`](#obj-specpackagepullsecrets) + * [`fn withName(name)`](#fn-specpackagepullsecretswithname) + * [`obj spec.runtimeConfigRef`](#obj-specruntimeconfigref) + * [`fn withApiVersion(apiVersion)`](#fn-specruntimeconfigrefwithapiversion) + * [`fn withKind(kind)`](#fn-specruntimeconfigrefwithkind) + * [`fn withName(name)`](#fn-specruntimeconfigrefwithname) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of ProviderRevision + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"ProviderRevisionSpec specifies configuration for a ProviderRevision." + +### fn spec.withCommonLabels + +```ts +withCommonLabels(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +### fn spec.withCommonLabelsMixin + +```ts +withCommonLabelsMixin(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +**Note:** This function appends passed data to existing values + +### fn spec.withDesiredState + +```ts +withDesiredState(desiredState) +``` + +"DesiredState of the PackageRevision. Can be either Active or Inactive." + +### fn spec.withIgnoreCrossplaneConstraints + +```ts +withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints) +``` + +"IgnoreCrossplaneConstraints indicates to the package manager whether to\nhonor Crossplane version constrains specified by the package.\nDefault is false." + +### fn spec.withImage + +```ts +withImage(image) +``` + +"Package image used by install Pod to extract package contents." + +### fn spec.withPackagePullPolicy + +```ts +withPackagePullPolicy(packagePullPolicy) +``` + +"PackagePullPolicy defines the pull policy for the package. It is also\napplied to any images pulled for the package, such as a provider's\ncontroller image.\nDefault is IfNotPresent." + +### fn spec.withPackagePullSecrets + +```ts +withPackagePullSecrets(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be\nused to fetch packages from private registries. They are also applied to\nany images pulled for the package, such as a provider's controller image." + +### fn spec.withPackagePullSecretsMixin + +```ts +withPackagePullSecretsMixin(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be\nused to fetch packages from private registries. They are also applied to\nany images pulled for the package, such as a provider's controller image." + +**Note:** This function appends passed data to existing values + +### fn spec.withRevision + +```ts +withRevision(revision) +``` + +"Revision number. Indicates when the revision will be garbage collected\nbased on the parent's RevisionHistoryLimit." + +### fn spec.withSkipDependencyResolution + +```ts +withSkipDependencyResolution(skipDependencyResolution) +``` + +"SkipDependencyResolution indicates to the package manager whether to skip\nresolving dependencies for a package. Setting this value to true may have\nunintended consequences.\nDefault is false." + +### fn spec.withTlsClientSecretName + +```ts +withTlsClientSecretName(tlsClientSecretName) +``` + +"TLSClientSecretName is the name of the TLS Secret that stores client\ncertificates of the Provider." + +### fn spec.withTlsServerSecretName + +```ts +withTlsServerSecretName(tlsServerSecretName) +``` + +"TLSServerSecretName is the name of the TLS Secret that stores server\ncertificates of the Provider." + +## obj spec.controllerConfigRef + +"ControllerConfigRef references a ControllerConfig resource that will be\nused to configure the packaged controller Deployment.\nDeprecated: Use RuntimeConfigReference instead." + +### fn spec.controllerConfigRef.withName + +```ts +withName(name) +``` + +"Name of the ControllerConfig." + +## obj spec.packagePullSecrets + +"PackagePullSecrets are named secrets in the same namespace that can be\nused to fetch packages from private registries. They are also applied to\nany images pulled for the package, such as a provider's controller image." + +### fn spec.packagePullSecrets.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.runtimeConfigRef + +"RuntimeConfigRef references a RuntimeConfig resource that will be used\nto configure the package runtime." + +### fn spec.runtimeConfigRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"API version of the referent." + +### fn spec.runtimeConfigRef.withKind + +```ts +withKind(kind) +``` + +"Kind of the referent." + +### fn spec.runtimeConfigRef.withName + +```ts +withName(name) +``` + +"Name of the RuntimeConfig." \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1alpha1/controllerConfig.md b/docs/crossplane/1.17/pkg/v1alpha1/controllerConfig.md new file mode 100644 index 0000000..2c114f2 --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1alpha1/controllerConfig.md @@ -0,0 +1,5213 @@ +--- +permalink: /crossplane/1.17/pkg/v1alpha1/controllerConfig/ +--- + +# pkg.v1alpha1.controllerConfig + +"A ControllerConfig applies settings to controllers like Provider pods.\nDeprecated: Use the\n[DeploymentRuntimeConfig](https://docs.crossplane.io/latest/concepts/providers#runtime-configuration)\ninstead.\n\n\nRead the\n[Package Runtime Configuration](https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md)\ndesign document for more details." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withArgs(args)`](#fn-specwithargs) + * [`fn withArgsMixin(args)`](#fn-specwithargsmixin) + * [`fn withEnv(env)`](#fn-specwithenv) + * [`fn withEnvFrom(envFrom)`](#fn-specwithenvfrom) + * [`fn withEnvFromMixin(envFrom)`](#fn-specwithenvfrommixin) + * [`fn withEnvMixin(env)`](#fn-specwithenvmixin) + * [`fn withImage(image)`](#fn-specwithimage) + * [`fn withImagePullPolicy(imagePullPolicy)`](#fn-specwithimagepullpolicy) + * [`fn withImagePullSecrets(imagePullSecrets)`](#fn-specwithimagepullsecrets) + * [`fn withImagePullSecretsMixin(imagePullSecrets)`](#fn-specwithimagepullsecretsmixin) + * [`fn withNodeName(nodeName)`](#fn-specwithnodename) + * [`fn withNodeSelector(nodeSelector)`](#fn-specwithnodeselector) + * [`fn withNodeSelectorMixin(nodeSelector)`](#fn-specwithnodeselectormixin) + * [`fn withPorts(ports)`](#fn-specwithports) + * [`fn withPortsMixin(ports)`](#fn-specwithportsmixin) + * [`fn withPriorityClassName(priorityClassName)`](#fn-specwithpriorityclassname) + * [`fn withReplicas(replicas)`](#fn-specwithreplicas) + * [`fn withRuntimeClassName(runtimeClassName)`](#fn-specwithruntimeclassname) + * [`fn withServiceAccountName(serviceAccountName)`](#fn-specwithserviceaccountname) + * [`fn withTolerations(tolerations)`](#fn-specwithtolerations) + * [`fn withTolerationsMixin(tolerations)`](#fn-specwithtolerationsmixin) + * [`fn withVolumeMounts(volumeMounts)`](#fn-specwithvolumemounts) + * [`fn withVolumeMountsMixin(volumeMounts)`](#fn-specwithvolumemountsmixin) + * [`fn withVolumes(volumes)`](#fn-specwithvolumes) + * [`fn withVolumesMixin(volumes)`](#fn-specwithvolumesmixin) + * [`obj spec.affinity`](#obj-specaffinity) + * [`obj spec.affinity.nodeAffinity`](#obj-specaffinitynodeaffinity) + * [`fn withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution)`](#fn-specaffinitynodeaffinitywithpreferredduringschedulingignoredduringexecution) + * [`fn withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution)`](#fn-specaffinitynodeaffinitywithpreferredduringschedulingignoredduringexecutionmixin) + * [`obj spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution`](#obj-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecution) + * [`fn withWeight(weight)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionwithweight) + * [`obj spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference`](#obj-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreference) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencewithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencewithmatchexpressionsmixin) + * [`fn withMatchFields(matchFields)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencewithmatchfields) + * [`fn withMatchFieldsMixin(matchFields)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencewithmatchfieldsmixin) + * [`obj spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions`](#obj-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchexpressions) + * [`fn withKey(key)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchexpressionswithvaluesmixin) + * [`obj spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields`](#obj-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchfields) + * [`fn withKey(key)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchfieldswithkey) + * [`fn withOperator(operator)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchfieldswithoperator) + * [`fn withValues(values)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchfieldswithvalues) + * [`fn withValuesMixin(values)`](#fn-specaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchfieldswithvaluesmixin) + * [`obj spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution`](#obj-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecution) + * [`fn withNodeSelectorTerms(nodeSelectorTerms)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionwithnodeselectorterms) + * [`fn withNodeSelectorTermsMixin(nodeSelectorTerms)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionwithnodeselectortermsmixin) + * [`obj spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms`](#obj-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectorterms) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermswithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermswithmatchexpressionsmixin) + * [`fn withMatchFields(matchFields)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermswithmatchfields) + * [`fn withMatchFieldsMixin(matchFields)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermswithmatchfieldsmixin) + * [`obj spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions`](#obj-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchexpressions) + * [`fn withKey(key)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchexpressionswithvaluesmixin) + * [`obj spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields`](#obj-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchfields) + * [`fn withKey(key)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchfieldswithkey) + * [`fn withOperator(operator)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchfieldswithoperator) + * [`fn withValues(values)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchfieldswithvalues) + * [`fn withValuesMixin(values)`](#fn-specaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchfieldswithvaluesmixin) + * [`obj spec.affinity.podAffinity`](#obj-specaffinitypodaffinity) + * [`fn withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution)`](#fn-specaffinitypodaffinitywithpreferredduringschedulingignoredduringexecution) + * [`fn withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution)`](#fn-specaffinitypodaffinitywithpreferredduringschedulingignoredduringexecutionmixin) + * [`fn withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution)`](#fn-specaffinitypodaffinitywithrequiredduringschedulingignoredduringexecution) + * [`fn withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution)`](#fn-specaffinitypodaffinitywithrequiredduringschedulingignoredduringexecutionmixin) + * [`obj spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution`](#obj-specaffinitypodaffinitypreferredduringschedulingignoredduringexecution) + * [`fn withWeight(weight)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionwithweight) + * [`obj spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm`](#obj-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinityterm) + * [`fn withMatchLabelKeys(matchLabelKeys)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmatchlabelkeys) + * [`fn withMatchLabelKeysMixin(matchLabelKeys)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmatchlabelkeysmixin) + * [`fn withMismatchLabelKeys(mismatchLabelKeys)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmismatchlabelkeys) + * [`fn withMismatchLabelKeysMixin(mismatchLabelKeys)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmismatchlabelkeysmixin) + * [`fn withNamespaces(namespaces)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithnamespaces) + * [`fn withNamespacesMixin(namespaces)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithnamespacesmixin) + * [`fn withTopologyKey(topologyKey)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithtopologykey) + * [`obj spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector`](#obj-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchlabelsmixin) + * [`obj spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions`](#obj-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressions) + * [`fn withKey(key)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithvaluesmixin) + * [`obj spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector`](#obj-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchlabelsmixin) + * [`obj spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions`](#obj-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressions) + * [`fn withKey(key)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithvaluesmixin) + * [`obj spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution`](#obj-specaffinitypodaffinityrequiredduringschedulingignoredduringexecution) + * [`fn withMatchLabelKeys(matchLabelKeys)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithmatchlabelkeys) + * [`fn withMatchLabelKeysMixin(matchLabelKeys)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithmatchlabelkeysmixin) + * [`fn withMismatchLabelKeys(mismatchLabelKeys)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithmismatchlabelkeys) + * [`fn withMismatchLabelKeysMixin(mismatchLabelKeys)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithmismatchlabelkeysmixin) + * [`fn withNamespaces(namespaces)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithnamespaces) + * [`fn withNamespacesMixin(namespaces)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithnamespacesmixin) + * [`fn withTopologyKey(topologyKey)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithtopologykey) + * [`obj spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector`](#obj-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchlabelsmixin) + * [`obj spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions`](#obj-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressions) + * [`fn withKey(key)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithvaluesmixin) + * [`obj spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector`](#obj-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchlabelsmixin) + * [`obj spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions`](#obj-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressions) + * [`fn withKey(key)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithvaluesmixin) + * [`obj spec.affinity.podAntiAffinity`](#obj-specaffinitypodantiaffinity) + * [`fn withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution)`](#fn-specaffinitypodantiaffinitywithpreferredduringschedulingignoredduringexecution) + * [`fn withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution)`](#fn-specaffinitypodantiaffinitywithpreferredduringschedulingignoredduringexecutionmixin) + * [`fn withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution)`](#fn-specaffinitypodantiaffinitywithrequiredduringschedulingignoredduringexecution) + * [`fn withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution)`](#fn-specaffinitypodantiaffinitywithrequiredduringschedulingignoredduringexecutionmixin) + * [`obj spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution`](#obj-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecution) + * [`fn withWeight(weight)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionwithweight) + * [`obj spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm`](#obj-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinityterm) + * [`fn withMatchLabelKeys(matchLabelKeys)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmatchlabelkeys) + * [`fn withMatchLabelKeysMixin(matchLabelKeys)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmatchlabelkeysmixin) + * [`fn withMismatchLabelKeys(mismatchLabelKeys)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmismatchlabelkeys) + * [`fn withMismatchLabelKeysMixin(mismatchLabelKeys)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmismatchlabelkeysmixin) + * [`fn withNamespaces(namespaces)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithnamespaces) + * [`fn withNamespacesMixin(namespaces)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithnamespacesmixin) + * [`fn withTopologyKey(topologyKey)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithtopologykey) + * [`obj spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector`](#obj-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchlabelsmixin) + * [`obj spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions`](#obj-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressions) + * [`fn withKey(key)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithvaluesmixin) + * [`obj spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector`](#obj-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchlabelsmixin) + * [`obj spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions`](#obj-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressions) + * [`fn withKey(key)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithvaluesmixin) + * [`obj spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution`](#obj-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecution) + * [`fn withMatchLabelKeys(matchLabelKeys)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithmatchlabelkeys) + * [`fn withMatchLabelKeysMixin(matchLabelKeys)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithmatchlabelkeysmixin) + * [`fn withMismatchLabelKeys(mismatchLabelKeys)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithmismatchlabelkeys) + * [`fn withMismatchLabelKeysMixin(mismatchLabelKeys)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithmismatchlabelkeysmixin) + * [`fn withNamespaces(namespaces)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithnamespaces) + * [`fn withNamespacesMixin(namespaces)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithnamespacesmixin) + * [`fn withTopologyKey(topologyKey)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithtopologykey) + * [`obj spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector`](#obj-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchlabelsmixin) + * [`obj spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions`](#obj-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressions) + * [`fn withKey(key)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithvaluesmixin) + * [`obj spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector`](#obj-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchlabelsmixin) + * [`obj spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions`](#obj-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressions) + * [`fn withKey(key)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithvaluesmixin) + * [`obj spec.env`](#obj-specenv) + * [`fn withName(name)`](#fn-specenvwithname) + * [`fn withValue(value)`](#fn-specenvwithvalue) + * [`obj spec.env.valueFrom`](#obj-specenvvaluefrom) + * [`obj spec.env.valueFrom.configMapKeyRef`](#obj-specenvvaluefromconfigmapkeyref) + * [`fn withKey(key)`](#fn-specenvvaluefromconfigmapkeyrefwithkey) + * [`fn withName(name)`](#fn-specenvvaluefromconfigmapkeyrefwithname) + * [`fn withOptional(optional)`](#fn-specenvvaluefromconfigmapkeyrefwithoptional) + * [`obj spec.env.valueFrom.fieldRef`](#obj-specenvvaluefromfieldref) + * [`fn withApiVersion(apiVersion)`](#fn-specenvvaluefromfieldrefwithapiversion) + * [`fn withFieldPath(fieldPath)`](#fn-specenvvaluefromfieldrefwithfieldpath) + * [`obj spec.env.valueFrom.resourceFieldRef`](#obj-specenvvaluefromresourcefieldref) + * [`fn withContainerName(containerName)`](#fn-specenvvaluefromresourcefieldrefwithcontainername) + * [`fn withDivisor(divisor)`](#fn-specenvvaluefromresourcefieldrefwithdivisor) + * [`fn withResource(resource)`](#fn-specenvvaluefromresourcefieldrefwithresource) + * [`obj spec.env.valueFrom.secretKeyRef`](#obj-specenvvaluefromsecretkeyref) + * [`fn withKey(key)`](#fn-specenvvaluefromsecretkeyrefwithkey) + * [`fn withName(name)`](#fn-specenvvaluefromsecretkeyrefwithname) + * [`fn withOptional(optional)`](#fn-specenvvaluefromsecretkeyrefwithoptional) + * [`obj spec.envFrom`](#obj-specenvfrom) + * [`fn withPrefix(prefix)`](#fn-specenvfromwithprefix) + * [`obj spec.envFrom.configMapRef`](#obj-specenvfromconfigmapref) + * [`fn withName(name)`](#fn-specenvfromconfigmaprefwithname) + * [`fn withOptional(optional)`](#fn-specenvfromconfigmaprefwithoptional) + * [`obj spec.envFrom.secretRef`](#obj-specenvfromsecretref) + * [`fn withName(name)`](#fn-specenvfromsecretrefwithname) + * [`fn withOptional(optional)`](#fn-specenvfromsecretrefwithoptional) + * [`obj spec.imagePullSecrets`](#obj-specimagepullsecrets) + * [`fn withName(name)`](#fn-specimagepullsecretswithname) + * [`obj spec.metadata`](#obj-specmetadata) + * [`fn withAnnotations(annotations)`](#fn-specmetadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-specmetadatawithannotationsmixin) + * [`fn withLabels(labels)`](#fn-specmetadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-specmetadatawithlabelsmixin) + * [`obj spec.podSecurityContext`](#obj-specpodsecuritycontext) + * [`fn withFsGroup(fsGroup)`](#fn-specpodsecuritycontextwithfsgroup) + * [`fn withFsGroupChangePolicy(fsGroupChangePolicy)`](#fn-specpodsecuritycontextwithfsgroupchangepolicy) + * [`fn withRunAsGroup(runAsGroup)`](#fn-specpodsecuritycontextwithrunasgroup) + * [`fn withRunAsNonRoot(runAsNonRoot)`](#fn-specpodsecuritycontextwithrunasnonroot) + * [`fn withRunAsUser(runAsUser)`](#fn-specpodsecuritycontextwithrunasuser) + * [`fn withSupplementalGroups(supplementalGroups)`](#fn-specpodsecuritycontextwithsupplementalgroups) + * [`fn withSupplementalGroupsMixin(supplementalGroups)`](#fn-specpodsecuritycontextwithsupplementalgroupsmixin) + * [`fn withSysctls(sysctls)`](#fn-specpodsecuritycontextwithsysctls) + * [`fn withSysctlsMixin(sysctls)`](#fn-specpodsecuritycontextwithsysctlsmixin) + * [`obj spec.podSecurityContext.appArmorProfile`](#obj-specpodsecuritycontextapparmorprofile) + * [`fn withLocalhostProfile(localhostProfile)`](#fn-specpodsecuritycontextapparmorprofilewithlocalhostprofile) + * [`fn withType(type)`](#fn-specpodsecuritycontextapparmorprofilewithtype) + * [`obj spec.podSecurityContext.seLinuxOptions`](#obj-specpodsecuritycontextselinuxoptions) + * [`fn withLevel(level)`](#fn-specpodsecuritycontextselinuxoptionswithlevel) + * [`fn withRole(role)`](#fn-specpodsecuritycontextselinuxoptionswithrole) + * [`fn withType(type)`](#fn-specpodsecuritycontextselinuxoptionswithtype) + * [`fn withUser(user)`](#fn-specpodsecuritycontextselinuxoptionswithuser) + * [`obj spec.podSecurityContext.seccompProfile`](#obj-specpodsecuritycontextseccompprofile) + * [`fn withLocalhostProfile(localhostProfile)`](#fn-specpodsecuritycontextseccompprofilewithlocalhostprofile) + * [`fn withType(type)`](#fn-specpodsecuritycontextseccompprofilewithtype) + * [`obj spec.podSecurityContext.sysctls`](#obj-specpodsecuritycontextsysctls) + * [`fn withName(name)`](#fn-specpodsecuritycontextsysctlswithname) + * [`fn withValue(value)`](#fn-specpodsecuritycontextsysctlswithvalue) + * [`obj spec.podSecurityContext.windowsOptions`](#obj-specpodsecuritycontextwindowsoptions) + * [`fn withGmsaCredentialSpec(gmsaCredentialSpec)`](#fn-specpodsecuritycontextwindowsoptionswithgmsacredentialspec) + * [`fn withGmsaCredentialSpecName(gmsaCredentialSpecName)`](#fn-specpodsecuritycontextwindowsoptionswithgmsacredentialspecname) + * [`fn withHostProcess(hostProcess)`](#fn-specpodsecuritycontextwindowsoptionswithhostprocess) + * [`fn withRunAsUserName(runAsUserName)`](#fn-specpodsecuritycontextwindowsoptionswithrunasusername) + * [`obj spec.ports`](#obj-specports) + * [`fn withContainerPort(containerPort)`](#fn-specportswithcontainerport) + * [`fn withHostIP(hostIP)`](#fn-specportswithhostip) + * [`fn withHostPort(hostPort)`](#fn-specportswithhostport) + * [`fn withName(name)`](#fn-specportswithname) + * [`fn withProtocol(protocol)`](#fn-specportswithprotocol) + * [`obj spec.resources`](#obj-specresources) + * [`fn withClaims(claims)`](#fn-specresourceswithclaims) + * [`fn withClaimsMixin(claims)`](#fn-specresourceswithclaimsmixin) + * [`fn withLimits(limits)`](#fn-specresourceswithlimits) + * [`fn withLimitsMixin(limits)`](#fn-specresourceswithlimitsmixin) + * [`fn withRequests(requests)`](#fn-specresourceswithrequests) + * [`fn withRequestsMixin(requests)`](#fn-specresourceswithrequestsmixin) + * [`obj spec.resources.claims`](#obj-specresourcesclaims) + * [`fn withName(name)`](#fn-specresourcesclaimswithname) + * [`obj spec.securityContext`](#obj-specsecuritycontext) + * [`fn withAllowPrivilegeEscalation(allowPrivilegeEscalation)`](#fn-specsecuritycontextwithallowprivilegeescalation) + * [`fn withPrivileged(privileged)`](#fn-specsecuritycontextwithprivileged) + * [`fn withProcMount(procMount)`](#fn-specsecuritycontextwithprocmount) + * [`fn withReadOnlyRootFilesystem(readOnlyRootFilesystem)`](#fn-specsecuritycontextwithreadonlyrootfilesystem) + * [`fn withRunAsGroup(runAsGroup)`](#fn-specsecuritycontextwithrunasgroup) + * [`fn withRunAsNonRoot(runAsNonRoot)`](#fn-specsecuritycontextwithrunasnonroot) + * [`fn withRunAsUser(runAsUser)`](#fn-specsecuritycontextwithrunasuser) + * [`obj spec.securityContext.appArmorProfile`](#obj-specsecuritycontextapparmorprofile) + * [`fn withLocalhostProfile(localhostProfile)`](#fn-specsecuritycontextapparmorprofilewithlocalhostprofile) + * [`fn withType(type)`](#fn-specsecuritycontextapparmorprofilewithtype) + * [`obj spec.securityContext.capabilities`](#obj-specsecuritycontextcapabilities) + * [`fn withAdd(add)`](#fn-specsecuritycontextcapabilitieswithadd) + * [`fn withAddMixin(add)`](#fn-specsecuritycontextcapabilitieswithaddmixin) + * [`fn withDrop(drop)`](#fn-specsecuritycontextcapabilitieswithdrop) + * [`fn withDropMixin(drop)`](#fn-specsecuritycontextcapabilitieswithdropmixin) + * [`obj spec.securityContext.seLinuxOptions`](#obj-specsecuritycontextselinuxoptions) + * [`fn withLevel(level)`](#fn-specsecuritycontextselinuxoptionswithlevel) + * [`fn withRole(role)`](#fn-specsecuritycontextselinuxoptionswithrole) + * [`fn withType(type)`](#fn-specsecuritycontextselinuxoptionswithtype) + * [`fn withUser(user)`](#fn-specsecuritycontextselinuxoptionswithuser) + * [`obj spec.securityContext.seccompProfile`](#obj-specsecuritycontextseccompprofile) + * [`fn withLocalhostProfile(localhostProfile)`](#fn-specsecuritycontextseccompprofilewithlocalhostprofile) + * [`fn withType(type)`](#fn-specsecuritycontextseccompprofilewithtype) + * [`obj spec.securityContext.windowsOptions`](#obj-specsecuritycontextwindowsoptions) + * [`fn withGmsaCredentialSpec(gmsaCredentialSpec)`](#fn-specsecuritycontextwindowsoptionswithgmsacredentialspec) + * [`fn withGmsaCredentialSpecName(gmsaCredentialSpecName)`](#fn-specsecuritycontextwindowsoptionswithgmsacredentialspecname) + * [`fn withHostProcess(hostProcess)`](#fn-specsecuritycontextwindowsoptionswithhostprocess) + * [`fn withRunAsUserName(runAsUserName)`](#fn-specsecuritycontextwindowsoptionswithrunasusername) + * [`obj spec.tolerations`](#obj-spectolerations) + * [`fn withEffect(effect)`](#fn-spectolerationswitheffect) + * [`fn withKey(key)`](#fn-spectolerationswithkey) + * [`fn withOperator(operator)`](#fn-spectolerationswithoperator) + * [`fn withTolerationSeconds(tolerationSeconds)`](#fn-spectolerationswithtolerationseconds) + * [`fn withValue(value)`](#fn-spectolerationswithvalue) + * [`obj spec.volumeMounts`](#obj-specvolumemounts) + * [`fn withMountPath(mountPath)`](#fn-specvolumemountswithmountpath) + * [`fn withMountPropagation(mountPropagation)`](#fn-specvolumemountswithmountpropagation) + * [`fn withName(name)`](#fn-specvolumemountswithname) + * [`fn withReadOnly(readOnly)`](#fn-specvolumemountswithreadonly) + * [`fn withRecursiveReadOnly(recursiveReadOnly)`](#fn-specvolumemountswithrecursivereadonly) + * [`fn withSubPath(subPath)`](#fn-specvolumemountswithsubpath) + * [`fn withSubPathExpr(subPathExpr)`](#fn-specvolumemountswithsubpathexpr) + * [`obj spec.volumes`](#obj-specvolumes) + * [`fn withName(name)`](#fn-specvolumeswithname) + * [`obj spec.volumes.awsElasticBlockStore`](#obj-specvolumesawselasticblockstore) + * [`fn withFsType(fsType)`](#fn-specvolumesawselasticblockstorewithfstype) + * [`fn withPartition(partition)`](#fn-specvolumesawselasticblockstorewithpartition) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesawselasticblockstorewithreadonly) + * [`fn withVolumeID(volumeID)`](#fn-specvolumesawselasticblockstorewithvolumeid) + * [`obj spec.volumes.azureDisk`](#obj-specvolumesazuredisk) + * [`fn withCachingMode(cachingMode)`](#fn-specvolumesazurediskwithcachingmode) + * [`fn withDiskName(diskName)`](#fn-specvolumesazurediskwithdiskname) + * [`fn withDiskURI(diskURI)`](#fn-specvolumesazurediskwithdiskuri) + * [`fn withFsType(fsType)`](#fn-specvolumesazurediskwithfstype) + * [`fn withKind(kind)`](#fn-specvolumesazurediskwithkind) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesazurediskwithreadonly) + * [`obj spec.volumes.azureFile`](#obj-specvolumesazurefile) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesazurefilewithreadonly) + * [`fn withSecretName(secretName)`](#fn-specvolumesazurefilewithsecretname) + * [`fn withShareName(shareName)`](#fn-specvolumesazurefilewithsharename) + * [`obj spec.volumes.cephfs`](#obj-specvolumescephfs) + * [`fn withMonitors(monitors)`](#fn-specvolumescephfswithmonitors) + * [`fn withMonitorsMixin(monitors)`](#fn-specvolumescephfswithmonitorsmixin) + * [`fn withPath(path)`](#fn-specvolumescephfswithpath) + * [`fn withReadOnly(readOnly)`](#fn-specvolumescephfswithreadonly) + * [`fn withSecretFile(secretFile)`](#fn-specvolumescephfswithsecretfile) + * [`fn withUser(user)`](#fn-specvolumescephfswithuser) + * [`obj spec.volumes.cephfs.secretRef`](#obj-specvolumescephfssecretref) + * [`fn withName(name)`](#fn-specvolumescephfssecretrefwithname) + * [`obj spec.volumes.cinder`](#obj-specvolumescinder) + * [`fn withFsType(fsType)`](#fn-specvolumescinderwithfstype) + * [`fn withReadOnly(readOnly)`](#fn-specvolumescinderwithreadonly) + * [`fn withVolumeID(volumeID)`](#fn-specvolumescinderwithvolumeid) + * [`obj spec.volumes.cinder.secretRef`](#obj-specvolumescindersecretref) + * [`fn withName(name)`](#fn-specvolumescindersecretrefwithname) + * [`obj spec.volumes.configMap`](#obj-specvolumesconfigmap) + * [`fn withDefaultMode(defaultMode)`](#fn-specvolumesconfigmapwithdefaultmode) + * [`fn withItems(items)`](#fn-specvolumesconfigmapwithitems) + * [`fn withItemsMixin(items)`](#fn-specvolumesconfigmapwithitemsmixin) + * [`fn withName(name)`](#fn-specvolumesconfigmapwithname) + * [`fn withOptional(optional)`](#fn-specvolumesconfigmapwithoptional) + * [`obj spec.volumes.configMap.items`](#obj-specvolumesconfigmapitems) + * [`fn withKey(key)`](#fn-specvolumesconfigmapitemswithkey) + * [`fn withMode(mode)`](#fn-specvolumesconfigmapitemswithmode) + * [`fn withPath(path)`](#fn-specvolumesconfigmapitemswithpath) + * [`obj spec.volumes.csi`](#obj-specvolumescsi) + * [`fn withDriver(driver)`](#fn-specvolumescsiwithdriver) + * [`fn withFsType(fsType)`](#fn-specvolumescsiwithfstype) + * [`fn withReadOnly(readOnly)`](#fn-specvolumescsiwithreadonly) + * [`fn withVolumeAttributes(volumeAttributes)`](#fn-specvolumescsiwithvolumeattributes) + * [`fn withVolumeAttributesMixin(volumeAttributes)`](#fn-specvolumescsiwithvolumeattributesmixin) + * [`obj spec.volumes.csi.nodePublishSecretRef`](#obj-specvolumescsinodepublishsecretref) + * [`fn withName(name)`](#fn-specvolumescsinodepublishsecretrefwithname) + * [`obj spec.volumes.downwardAPI`](#obj-specvolumesdownwardapi) + * [`fn withDefaultMode(defaultMode)`](#fn-specvolumesdownwardapiwithdefaultmode) + * [`fn withItems(items)`](#fn-specvolumesdownwardapiwithitems) + * [`fn withItemsMixin(items)`](#fn-specvolumesdownwardapiwithitemsmixin) + * [`obj spec.volumes.downwardAPI.items`](#obj-specvolumesdownwardapiitems) + * [`fn withMode(mode)`](#fn-specvolumesdownwardapiitemswithmode) + * [`fn withPath(path)`](#fn-specvolumesdownwardapiitemswithpath) + * [`obj spec.volumes.downwardAPI.items.fieldRef`](#obj-specvolumesdownwardapiitemsfieldref) + * [`fn withApiVersion(apiVersion)`](#fn-specvolumesdownwardapiitemsfieldrefwithapiversion) + * [`fn withFieldPath(fieldPath)`](#fn-specvolumesdownwardapiitemsfieldrefwithfieldpath) + * [`obj spec.volumes.downwardAPI.items.resourceFieldRef`](#obj-specvolumesdownwardapiitemsresourcefieldref) + * [`fn withContainerName(containerName)`](#fn-specvolumesdownwardapiitemsresourcefieldrefwithcontainername) + * [`fn withDivisor(divisor)`](#fn-specvolumesdownwardapiitemsresourcefieldrefwithdivisor) + * [`fn withResource(resource)`](#fn-specvolumesdownwardapiitemsresourcefieldrefwithresource) + * [`obj spec.volumes.emptyDir`](#obj-specvolumesemptydir) + * [`fn withMedium(medium)`](#fn-specvolumesemptydirwithmedium) + * [`fn withSizeLimit(sizeLimit)`](#fn-specvolumesemptydirwithsizelimit) + * [`obj spec.volumes.ephemeral`](#obj-specvolumesephemeral) + * [`obj spec.volumes.ephemeral.volumeClaimTemplate`](#obj-specvolumesephemeralvolumeclaimtemplate) + * [`obj spec.volumes.ephemeral.volumeClaimTemplate.metadata`](#obj-specvolumesephemeralvolumeclaimtemplatemetadata) + * [`fn withAnnotations(annotations)`](#fn-specvolumesephemeralvolumeclaimtemplatemetadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-specvolumesephemeralvolumeclaimtemplatemetadatawithannotationsmixin) + * [`fn withFinalizers(finalizers)`](#fn-specvolumesephemeralvolumeclaimtemplatemetadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-specvolumesephemeralvolumeclaimtemplatemetadatawithfinalizersmixin) + * [`fn withLabels(labels)`](#fn-specvolumesephemeralvolumeclaimtemplatemetadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-specvolumesephemeralvolumeclaimtemplatemetadatawithlabelsmixin) + * [`fn withName(name)`](#fn-specvolumesephemeralvolumeclaimtemplatemetadatawithname) + * [`fn withNamespace(namespace)`](#fn-specvolumesephemeralvolumeclaimtemplatemetadatawithnamespace) + * [`obj spec.volumes.ephemeral.volumeClaimTemplate.spec`](#obj-specvolumesephemeralvolumeclaimtemplatespec) + * [`fn withAccessModes(accessModes)`](#fn-specvolumesephemeralvolumeclaimtemplatespecwithaccessmodes) + * [`fn withAccessModesMixin(accessModes)`](#fn-specvolumesephemeralvolumeclaimtemplatespecwithaccessmodesmixin) + * [`fn withStorageClassName(storageClassName)`](#fn-specvolumesephemeralvolumeclaimtemplatespecwithstorageclassname) + * [`fn withVolumeAttributesClassName(volumeAttributesClassName)`](#fn-specvolumesephemeralvolumeclaimtemplatespecwithvolumeattributesclassname) + * [`fn withVolumeMode(volumeMode)`](#fn-specvolumesephemeralvolumeclaimtemplatespecwithvolumemode) + * [`fn withVolumeName(volumeName)`](#fn-specvolumesephemeralvolumeclaimtemplatespecwithvolumename) + * [`obj spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSource`](#obj-specvolumesephemeralvolumeclaimtemplatespecdatasource) + * [`fn withApiGroup(apiGroup)`](#fn-specvolumesephemeralvolumeclaimtemplatespecdatasourcewithapigroup) + * [`fn withKind(kind)`](#fn-specvolumesephemeralvolumeclaimtemplatespecdatasourcewithkind) + * [`fn withName(name)`](#fn-specvolumesephemeralvolumeclaimtemplatespecdatasourcewithname) + * [`obj spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSourceRef`](#obj-specvolumesephemeralvolumeclaimtemplatespecdatasourceref) + * [`fn withApiGroup(apiGroup)`](#fn-specvolumesephemeralvolumeclaimtemplatespecdatasourcerefwithapigroup) + * [`fn withKind(kind)`](#fn-specvolumesephemeralvolumeclaimtemplatespecdatasourcerefwithkind) + * [`fn withName(name)`](#fn-specvolumesephemeralvolumeclaimtemplatespecdatasourcerefwithname) + * [`fn withNamespace(namespace)`](#fn-specvolumesephemeralvolumeclaimtemplatespecdatasourcerefwithnamespace) + * [`obj spec.volumes.ephemeral.volumeClaimTemplate.spec.resources`](#obj-specvolumesephemeralvolumeclaimtemplatespecresources) + * [`fn withLimits(limits)`](#fn-specvolumesephemeralvolumeclaimtemplatespecresourceswithlimits) + * [`fn withLimitsMixin(limits)`](#fn-specvolumesephemeralvolumeclaimtemplatespecresourceswithlimitsmixin) + * [`fn withRequests(requests)`](#fn-specvolumesephemeralvolumeclaimtemplatespecresourceswithrequests) + * [`fn withRequestsMixin(requests)`](#fn-specvolumesephemeralvolumeclaimtemplatespecresourceswithrequestsmixin) + * [`obj spec.volumes.ephemeral.volumeClaimTemplate.spec.selector`](#obj-specvolumesephemeralvolumeclaimtemplatespecselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specvolumesephemeralvolumeclaimtemplatespecselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specvolumesephemeralvolumeclaimtemplatespecselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specvolumesephemeralvolumeclaimtemplatespecselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specvolumesephemeralvolumeclaimtemplatespecselectorwithmatchlabelsmixin) + * [`obj spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.matchExpressions`](#obj-specvolumesephemeralvolumeclaimtemplatespecselectormatchexpressions) + * [`fn withKey(key)`](#fn-specvolumesephemeralvolumeclaimtemplatespecselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specvolumesephemeralvolumeclaimtemplatespecselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specvolumesephemeralvolumeclaimtemplatespecselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specvolumesephemeralvolumeclaimtemplatespecselectormatchexpressionswithvaluesmixin) + * [`obj spec.volumes.fc`](#obj-specvolumesfc) + * [`fn withFsType(fsType)`](#fn-specvolumesfcwithfstype) + * [`fn withLun(lun)`](#fn-specvolumesfcwithlun) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesfcwithreadonly) + * [`fn withTargetWWNs(targetWWNs)`](#fn-specvolumesfcwithtargetwwns) + * [`fn withTargetWWNsMixin(targetWWNs)`](#fn-specvolumesfcwithtargetwwnsmixin) + * [`fn withWwids(wwids)`](#fn-specvolumesfcwithwwids) + * [`fn withWwidsMixin(wwids)`](#fn-specvolumesfcwithwwidsmixin) + * [`obj spec.volumes.flexVolume`](#obj-specvolumesflexvolume) + * [`fn withDriver(driver)`](#fn-specvolumesflexvolumewithdriver) + * [`fn withFsType(fsType)`](#fn-specvolumesflexvolumewithfstype) + * [`fn withOptions(options)`](#fn-specvolumesflexvolumewithoptions) + * [`fn withOptionsMixin(options)`](#fn-specvolumesflexvolumewithoptionsmixin) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesflexvolumewithreadonly) + * [`obj spec.volumes.flexVolume.secretRef`](#obj-specvolumesflexvolumesecretref) + * [`fn withName(name)`](#fn-specvolumesflexvolumesecretrefwithname) + * [`obj spec.volumes.flocker`](#obj-specvolumesflocker) + * [`fn withDatasetName(datasetName)`](#fn-specvolumesflockerwithdatasetname) + * [`fn withDatasetUUID(datasetUUID)`](#fn-specvolumesflockerwithdatasetuuid) + * [`obj spec.volumes.gcePersistentDisk`](#obj-specvolumesgcepersistentdisk) + * [`fn withFsType(fsType)`](#fn-specvolumesgcepersistentdiskwithfstype) + * [`fn withPartition(partition)`](#fn-specvolumesgcepersistentdiskwithpartition) + * [`fn withPdName(pdName)`](#fn-specvolumesgcepersistentdiskwithpdname) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesgcepersistentdiskwithreadonly) + * [`obj spec.volumes.gitRepo`](#obj-specvolumesgitrepo) + * [`fn withDirectory(directory)`](#fn-specvolumesgitrepowithdirectory) + * [`fn withRepository(repository)`](#fn-specvolumesgitrepowithrepository) + * [`fn withRevision(revision)`](#fn-specvolumesgitrepowithrevision) + * [`obj spec.volumes.glusterfs`](#obj-specvolumesglusterfs) + * [`fn withEndpoints(endpoints)`](#fn-specvolumesglusterfswithendpoints) + * [`fn withPath(path)`](#fn-specvolumesglusterfswithpath) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesglusterfswithreadonly) + * [`obj spec.volumes.hostPath`](#obj-specvolumeshostpath) + * [`fn withPath(path)`](#fn-specvolumeshostpathwithpath) + * [`fn withType(type)`](#fn-specvolumeshostpathwithtype) + * [`obj spec.volumes.iscsi`](#obj-specvolumesiscsi) + * [`fn withChapAuthDiscovery(chapAuthDiscovery)`](#fn-specvolumesiscsiwithchapauthdiscovery) + * [`fn withChapAuthSession(chapAuthSession)`](#fn-specvolumesiscsiwithchapauthsession) + * [`fn withFsType(fsType)`](#fn-specvolumesiscsiwithfstype) + * [`fn withInitiatorName(initiatorName)`](#fn-specvolumesiscsiwithinitiatorname) + * [`fn withIqn(iqn)`](#fn-specvolumesiscsiwithiqn) + * [`fn withIscsiInterface(iscsiInterface)`](#fn-specvolumesiscsiwithiscsiinterface) + * [`fn withLun(lun)`](#fn-specvolumesiscsiwithlun) + * [`fn withPortals(portals)`](#fn-specvolumesiscsiwithportals) + * [`fn withPortalsMixin(portals)`](#fn-specvolumesiscsiwithportalsmixin) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesiscsiwithreadonly) + * [`fn withTargetPortal(targetPortal)`](#fn-specvolumesiscsiwithtargetportal) + * [`obj spec.volumes.iscsi.secretRef`](#obj-specvolumesiscsisecretref) + * [`fn withName(name)`](#fn-specvolumesiscsisecretrefwithname) + * [`obj spec.volumes.nfs`](#obj-specvolumesnfs) + * [`fn withPath(path)`](#fn-specvolumesnfswithpath) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesnfswithreadonly) + * [`fn withServer(server)`](#fn-specvolumesnfswithserver) + * [`obj spec.volumes.persistentVolumeClaim`](#obj-specvolumespersistentvolumeclaim) + * [`fn withClaimName(claimName)`](#fn-specvolumespersistentvolumeclaimwithclaimname) + * [`fn withReadOnly(readOnly)`](#fn-specvolumespersistentvolumeclaimwithreadonly) + * [`obj spec.volumes.photonPersistentDisk`](#obj-specvolumesphotonpersistentdisk) + * [`fn withFsType(fsType)`](#fn-specvolumesphotonpersistentdiskwithfstype) + * [`fn withPdID(pdID)`](#fn-specvolumesphotonpersistentdiskwithpdid) + * [`obj spec.volumes.portworxVolume`](#obj-specvolumesportworxvolume) + * [`fn withFsType(fsType)`](#fn-specvolumesportworxvolumewithfstype) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesportworxvolumewithreadonly) + * [`fn withVolumeID(volumeID)`](#fn-specvolumesportworxvolumewithvolumeid) + * [`obj spec.volumes.projected`](#obj-specvolumesprojected) + * [`fn withDefaultMode(defaultMode)`](#fn-specvolumesprojectedwithdefaultmode) + * [`fn withSources(sources)`](#fn-specvolumesprojectedwithsources) + * [`fn withSourcesMixin(sources)`](#fn-specvolumesprojectedwithsourcesmixin) + * [`obj spec.volumes.projected.sources`](#obj-specvolumesprojectedsources) + * [`obj spec.volumes.projected.sources.clusterTrustBundle`](#obj-specvolumesprojectedsourcesclustertrustbundle) + * [`fn withName(name)`](#fn-specvolumesprojectedsourcesclustertrustbundlewithname) + * [`fn withOptional(optional)`](#fn-specvolumesprojectedsourcesclustertrustbundlewithoptional) + * [`fn withPath(path)`](#fn-specvolumesprojectedsourcesclustertrustbundlewithpath) + * [`fn withSignerName(signerName)`](#fn-specvolumesprojectedsourcesclustertrustbundlewithsignername) + * [`obj spec.volumes.projected.sources.clusterTrustBundle.labelSelector`](#obj-specvolumesprojectedsourcesclustertrustbundlelabelselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specvolumesprojectedsourcesclustertrustbundlelabelselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specvolumesprojectedsourcesclustertrustbundlelabelselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specvolumesprojectedsourcesclustertrustbundlelabelselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specvolumesprojectedsourcesclustertrustbundlelabelselectorwithmatchlabelsmixin) + * [`obj spec.volumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions`](#obj-specvolumesprojectedsourcesclustertrustbundlelabelselectormatchexpressions) + * [`fn withKey(key)`](#fn-specvolumesprojectedsourcesclustertrustbundlelabelselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specvolumesprojectedsourcesclustertrustbundlelabelselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specvolumesprojectedsourcesclustertrustbundlelabelselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specvolumesprojectedsourcesclustertrustbundlelabelselectormatchexpressionswithvaluesmixin) + * [`obj spec.volumes.projected.sources.configMap`](#obj-specvolumesprojectedsourcesconfigmap) + * [`fn withItems(items)`](#fn-specvolumesprojectedsourcesconfigmapwithitems) + * [`fn withItemsMixin(items)`](#fn-specvolumesprojectedsourcesconfigmapwithitemsmixin) + * [`fn withName(name)`](#fn-specvolumesprojectedsourcesconfigmapwithname) + * [`fn withOptional(optional)`](#fn-specvolumesprojectedsourcesconfigmapwithoptional) + * [`obj spec.volumes.projected.sources.configMap.items`](#obj-specvolumesprojectedsourcesconfigmapitems) + * [`fn withKey(key)`](#fn-specvolumesprojectedsourcesconfigmapitemswithkey) + * [`fn withMode(mode)`](#fn-specvolumesprojectedsourcesconfigmapitemswithmode) + * [`fn withPath(path)`](#fn-specvolumesprojectedsourcesconfigmapitemswithpath) + * [`obj spec.volumes.projected.sources.downwardAPI`](#obj-specvolumesprojectedsourcesdownwardapi) + * [`fn withItems(items)`](#fn-specvolumesprojectedsourcesdownwardapiwithitems) + * [`fn withItemsMixin(items)`](#fn-specvolumesprojectedsourcesdownwardapiwithitemsmixin) + * [`obj spec.volumes.projected.sources.downwardAPI.items`](#obj-specvolumesprojectedsourcesdownwardapiitems) + * [`fn withMode(mode)`](#fn-specvolumesprojectedsourcesdownwardapiitemswithmode) + * [`fn withPath(path)`](#fn-specvolumesprojectedsourcesdownwardapiitemswithpath) + * [`obj spec.volumes.projected.sources.downwardAPI.items.fieldRef`](#obj-specvolumesprojectedsourcesdownwardapiitemsfieldref) + * [`fn withApiVersion(apiVersion)`](#fn-specvolumesprojectedsourcesdownwardapiitemsfieldrefwithapiversion) + * [`fn withFieldPath(fieldPath)`](#fn-specvolumesprojectedsourcesdownwardapiitemsfieldrefwithfieldpath) + * [`obj spec.volumes.projected.sources.downwardAPI.items.resourceFieldRef`](#obj-specvolumesprojectedsourcesdownwardapiitemsresourcefieldref) + * [`fn withContainerName(containerName)`](#fn-specvolumesprojectedsourcesdownwardapiitemsresourcefieldrefwithcontainername) + * [`fn withDivisor(divisor)`](#fn-specvolumesprojectedsourcesdownwardapiitemsresourcefieldrefwithdivisor) + * [`fn withResource(resource)`](#fn-specvolumesprojectedsourcesdownwardapiitemsresourcefieldrefwithresource) + * [`obj spec.volumes.projected.sources.secret`](#obj-specvolumesprojectedsourcessecret) + * [`fn withItems(items)`](#fn-specvolumesprojectedsourcessecretwithitems) + * [`fn withItemsMixin(items)`](#fn-specvolumesprojectedsourcessecretwithitemsmixin) + * [`fn withName(name)`](#fn-specvolumesprojectedsourcessecretwithname) + * [`fn withOptional(optional)`](#fn-specvolumesprojectedsourcessecretwithoptional) + * [`obj spec.volumes.projected.sources.secret.items`](#obj-specvolumesprojectedsourcessecretitems) + * [`fn withKey(key)`](#fn-specvolumesprojectedsourcessecretitemswithkey) + * [`fn withMode(mode)`](#fn-specvolumesprojectedsourcessecretitemswithmode) + * [`fn withPath(path)`](#fn-specvolumesprojectedsourcessecretitemswithpath) + * [`obj spec.volumes.projected.sources.serviceAccountToken`](#obj-specvolumesprojectedsourcesserviceaccounttoken) + * [`fn withAudience(audience)`](#fn-specvolumesprojectedsourcesserviceaccounttokenwithaudience) + * [`fn withExpirationSeconds(expirationSeconds)`](#fn-specvolumesprojectedsourcesserviceaccounttokenwithexpirationseconds) + * [`fn withPath(path)`](#fn-specvolumesprojectedsourcesserviceaccounttokenwithpath) + * [`obj spec.volumes.quobyte`](#obj-specvolumesquobyte) + * [`fn withGroup(group)`](#fn-specvolumesquobytewithgroup) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesquobytewithreadonly) + * [`fn withRegistry(registry)`](#fn-specvolumesquobytewithregistry) + * [`fn withTenant(tenant)`](#fn-specvolumesquobytewithtenant) + * [`fn withUser(user)`](#fn-specvolumesquobytewithuser) + * [`fn withVolume(volume)`](#fn-specvolumesquobytewithvolume) + * [`obj spec.volumes.rbd`](#obj-specvolumesrbd) + * [`fn withFsType(fsType)`](#fn-specvolumesrbdwithfstype) + * [`fn withImage(image)`](#fn-specvolumesrbdwithimage) + * [`fn withKeyring(keyring)`](#fn-specvolumesrbdwithkeyring) + * [`fn withMonitors(monitors)`](#fn-specvolumesrbdwithmonitors) + * [`fn withMonitorsMixin(monitors)`](#fn-specvolumesrbdwithmonitorsmixin) + * [`fn withPool(pool)`](#fn-specvolumesrbdwithpool) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesrbdwithreadonly) + * [`fn withUser(user)`](#fn-specvolumesrbdwithuser) + * [`obj spec.volumes.rbd.secretRef`](#obj-specvolumesrbdsecretref) + * [`fn withName(name)`](#fn-specvolumesrbdsecretrefwithname) + * [`obj spec.volumes.scaleIO`](#obj-specvolumesscaleio) + * [`fn withFsType(fsType)`](#fn-specvolumesscaleiowithfstype) + * [`fn withGateway(gateway)`](#fn-specvolumesscaleiowithgateway) + * [`fn withProtectionDomain(protectionDomain)`](#fn-specvolumesscaleiowithprotectiondomain) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesscaleiowithreadonly) + * [`fn withSslEnabled(sslEnabled)`](#fn-specvolumesscaleiowithsslenabled) + * [`fn withStorageMode(storageMode)`](#fn-specvolumesscaleiowithstoragemode) + * [`fn withStoragePool(storagePool)`](#fn-specvolumesscaleiowithstoragepool) + * [`fn withSystem(system)`](#fn-specvolumesscaleiowithsystem) + * [`fn withVolumeName(volumeName)`](#fn-specvolumesscaleiowithvolumename) + * [`obj spec.volumes.scaleIO.secretRef`](#obj-specvolumesscaleiosecretref) + * [`fn withName(name)`](#fn-specvolumesscaleiosecretrefwithname) + * [`obj spec.volumes.secret`](#obj-specvolumessecret) + * [`fn withDefaultMode(defaultMode)`](#fn-specvolumessecretwithdefaultmode) + * [`fn withItems(items)`](#fn-specvolumessecretwithitems) + * [`fn withItemsMixin(items)`](#fn-specvolumessecretwithitemsmixin) + * [`fn withOptional(optional)`](#fn-specvolumessecretwithoptional) + * [`fn withSecretName(secretName)`](#fn-specvolumessecretwithsecretname) + * [`obj spec.volumes.secret.items`](#obj-specvolumessecretitems) + * [`fn withKey(key)`](#fn-specvolumessecretitemswithkey) + * [`fn withMode(mode)`](#fn-specvolumessecretitemswithmode) + * [`fn withPath(path)`](#fn-specvolumessecretitemswithpath) + * [`obj spec.volumes.storageos`](#obj-specvolumesstorageos) + * [`fn withFsType(fsType)`](#fn-specvolumesstorageoswithfstype) + * [`fn withReadOnly(readOnly)`](#fn-specvolumesstorageoswithreadonly) + * [`fn withVolumeName(volumeName)`](#fn-specvolumesstorageoswithvolumename) + * [`fn withVolumeNamespace(volumeNamespace)`](#fn-specvolumesstorageoswithvolumenamespace) + * [`obj spec.volumes.storageos.secretRef`](#obj-specvolumesstorageossecretref) + * [`fn withName(name)`](#fn-specvolumesstorageossecretrefwithname) + * [`obj spec.volumes.vsphereVolume`](#obj-specvolumesvspherevolume) + * [`fn withFsType(fsType)`](#fn-specvolumesvspherevolumewithfstype) + * [`fn withStoragePolicyID(storagePolicyID)`](#fn-specvolumesvspherevolumewithstoragepolicyid) + * [`fn withStoragePolicyName(storagePolicyName)`](#fn-specvolumesvspherevolumewithstoragepolicyname) + * [`fn withVolumePath(volumePath)`](#fn-specvolumesvspherevolumewithvolumepath) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of ControllerConfig + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"ControllerConfigSpec specifies the configuration for a packaged controller.\nValues provided will override package manager defaults. Labels and\nannotations are passed to both the controller Deployment and ServiceAccount." + +### fn spec.withArgs + +```ts +withArgs(args) +``` + +"Arguments to the entrypoint.\nThe docker image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\nregardless of whether the variable exists or not.\nCannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +### fn spec.withArgsMixin + +```ts +withArgsMixin(args) +``` + +"Arguments to the entrypoint.\nThe docker image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\nregardless of whether the variable exists or not.\nCannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +**Note:** This function appends passed data to existing values + +### fn spec.withEnv + +```ts +withEnv(env) +``` + +"List of environment variables to set in the container.\nCannot be updated." + +### fn spec.withEnvFrom + +```ts +withEnvFrom(envFrom) +``` + +"List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." + +### fn spec.withEnvFromMixin + +```ts +withEnvFromMixin(envFrom) +``` + +"List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.withEnvMixin + +```ts +withEnvMixin(env) +``` + +"List of environment variables to set in the container.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.withImage + +```ts +withImage(image) +``` + +"Docker image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets." + +### fn spec.withImagePullPolicy + +```ts +withImagePullPolicy(imagePullPolicy) +``` + +"Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images" + +### fn spec.withImagePullSecrets + +```ts +withImagePullSecrets(imagePullSecrets) +``` + +"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\nIf specified, these secrets will be passed to individual puller implementations for them to use. For example,\nin the case of docker, only DockerConfig type secrets are honored.\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod\nSetting ImagePullSecrets will replace any secrets that have been\npropagated to a controller Deployment, typically via packagePullSecrets." + +### fn spec.withImagePullSecretsMixin + +```ts +withImagePullSecretsMixin(imagePullSecrets) +``` + +"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\nIf specified, these secrets will be passed to individual puller implementations for them to use. For example,\nin the case of docker, only DockerConfig type secrets are honored.\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod\nSetting ImagePullSecrets will replace any secrets that have been\npropagated to a controller Deployment, typically via packagePullSecrets." + +**Note:** This function appends passed data to existing values + +### fn spec.withNodeName + +```ts +withNodeName(nodeName) +``` + +"NodeName is a request to schedule this pod onto a specific node. If it is non-empty,\nthe scheduler simply schedules this pod onto that node, assuming that it fits resource\nrequirements." + +### fn spec.withNodeSelector + +```ts +withNodeSelector(nodeSelector) +``` + +"NodeSelector is a selector which must be true for the pod to fit on a node.\nSelector which must match a node's labels for the pod to be scheduled on that node.\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" + +### fn spec.withNodeSelectorMixin + +```ts +withNodeSelectorMixin(nodeSelector) +``` + +"NodeSelector is a selector which must be true for the pod to fit on a node.\nSelector which must match a node's labels for the pod to be scheduled on that node.\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" + +**Note:** This function appends passed data to existing values + +### fn spec.withPorts + +```ts +withPorts(ports) +``` + +"List of container ports to expose on the container" + +### fn spec.withPortsMixin + +```ts +withPortsMixin(ports) +``` + +"List of container ports to expose on the container" + +**Note:** This function appends passed data to existing values + +### fn spec.withPriorityClassName + +```ts +withPriorityClassName(priorityClassName) +``` + +"If specified, indicates the pod's priority. \"system-node-critical\" and\n\"system-cluster-critical\" are two special keywords which indicate the\nhighest priorities with the former being the highest priority. Any other\nname must be defined by creating a PriorityClass object with that name.\nIf not specified, the pod priority will be default or zero if there is no\ndefault." + +### fn spec.withReplicas + +```ts +withReplicas(replicas) +``` + +"Number of desired pods. This is a pointer to distinguish between explicit\nzero and not specified. Defaults to 1.\nNote: If more than 1 replica is set and leader election is not enabled then\ncontrollers could conflict. Environment variable \"LEADER_ELECTION\" can be\nused to enable leader election process." + +### fn spec.withRuntimeClassName + +```ts +withRuntimeClassName(runtimeClassName) +``` + +"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used\nto run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.\nIf unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an\nempty definition that uses the default runtime handler.\nMore info: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md\nThis is a beta feature as of Kubernetes v1.14." + +### fn spec.withServiceAccountName + +```ts +withServiceAccountName(serviceAccountName) +``` + +"ServiceAccountName is the name of the ServiceAccount to use to run this pod.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/\nIf specified, a ServiceAccount named this ServiceAccountName will be used for\nthe spec.serviceAccountName field in Pods to be created and for the subjects.name field\nin a ClusterRoleBinding to be created.\nIf there is no ServiceAccount named this ServiceAccountName, a new ServiceAccount\nwill be created.\nIf there is a pre-existing ServiceAccount named this ServiceAccountName, the ServiceAccount\nwill be used. The annotations in the ControllerConfig will be copied to the ServiceAccount\nand pre-existing annotations will be kept.\nRegardless of whether there is a ServiceAccount created by Crossplane or is in place already,\nthe ServiceAccount will be deleted once the Provider and ControllerConfig are deleted." + +### fn spec.withTolerations + +```ts +withTolerations(tolerations) +``` + +"If specified, the pod's tolerations." + +### fn spec.withTolerationsMixin + +```ts +withTolerationsMixin(tolerations) +``` + +"If specified, the pod's tolerations." + +**Note:** This function appends passed data to existing values + +### fn spec.withVolumeMounts + +```ts +withVolumeMounts(volumeMounts) +``` + +"List of VolumeMounts to mount into the container's filesystem.\nCannot be updated." + +### fn spec.withVolumeMountsMixin + +```ts +withVolumeMountsMixin(volumeMounts) +``` + +"List of VolumeMounts to mount into the container's filesystem.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.withVolumes + +```ts +withVolumes(volumes) +``` + +"List of volumes that can be mounted by containers belonging to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes" + +### fn spec.withVolumesMixin + +```ts +withVolumesMixin(volumes) +``` + +"List of volumes that can be mounted by containers belonging to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes" + +**Note:** This function appends passed data to existing values + +## obj spec.affinity + +"If specified, the pod's scheduling constraints" + +## obj spec.affinity.nodeAffinity + +"Describes node affinity scheduling rules for the pod." + +### fn spec.affinity.nodeAffinity.withPreferredDuringSchedulingIgnoredDuringExecution + +```ts +withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution) +``` + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node matches the corresponding matchExpressions; the\nnode(s) with the highest sum are the most preferred." + +### fn spec.affinity.nodeAffinity.withPreferredDuringSchedulingIgnoredDuringExecutionMixin + +```ts +withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution) +``` + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node matches the corresponding matchExpressions; the\nnode(s) with the highest sum are the most preferred." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node matches the corresponding matchExpressions; the\nnode(s) with the highest sum are the most preferred." + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.withWeight + +```ts +withWeight(weight) +``` + +"Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100." + +## obj spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference + +"A node selector term, associated with the corresponding weight." + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"A list of node selector requirements by node's labels." + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"A list of node selector requirements by node's labels." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.withMatchFields + +```ts +withMatchFields(matchFields) +``` + +"A list of node selector requirements by node's fields." + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.withMatchFieldsMixin + +```ts +withMatchFieldsMixin(matchFields) +``` + +"A list of node selector requirements by node's fields." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions + +"A list of node selector requirements by node's labels." + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.withKey + +```ts +withKey(key) +``` + +"The label key that the selector applies to." + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.withValues + +```ts +withValues(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields + +"A list of node selector requirements by node's fields." + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.withKey + +```ts +withKey(key) +``` + +"The label key that the selector applies to." + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.withOperator + +```ts +withOperator(operator) +``` + +"Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.withValues + +```ts +withValues(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +### fn spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution + +"If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to an update), the system\nmay or may not try to eventually evict the pod from its node." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.withNodeSelectorTerms + +```ts +withNodeSelectorTerms(nodeSelectorTerms) +``` + +"Required. A list of node selector terms. The terms are ORed." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.withNodeSelectorTermsMixin + +```ts +withNodeSelectorTermsMixin(nodeSelectorTerms) +``` + +"Required. A list of node selector terms. The terms are ORed." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms + +"Required. A list of node selector terms. The terms are ORed." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"A list of node selector requirements by node's labels." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"A list of node selector requirements by node's labels." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.withMatchFields + +```ts +withMatchFields(matchFields) +``` + +"A list of node selector requirements by node's fields." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.withMatchFieldsMixin + +```ts +withMatchFieldsMixin(matchFields) +``` + +"A list of node selector requirements by node's fields." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions + +"A list of node selector requirements by node's labels." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.withKey + +```ts +withKey(key) +``` + +"The label key that the selector applies to." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.withValues + +```ts +withValues(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields + +"A list of node selector requirements by node's fields." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.withKey + +```ts +withKey(key) +``` + +"The label key that the selector applies to." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.withOperator + +```ts +withOperator(operator) +``` + +"Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.withValues + +```ts +withValues(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +### fn spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAffinity + +"Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s))." + +### fn spec.affinity.podAffinity.withPreferredDuringSchedulingIgnoredDuringExecution + +```ts +withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution) +``` + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." + +### fn spec.affinity.podAffinity.withPreferredDuringSchedulingIgnoredDuringExecutionMixin + +```ts +withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution) +``` + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAffinity.withRequiredDuringSchedulingIgnoredDuringExecution + +```ts +withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution) +``` + +"If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." + +### fn spec.affinity.podAffinity.withRequiredDuringSchedulingIgnoredDuringExecutionMixin + +```ts +withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution) +``` + +"If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.withWeight + +```ts +withWeight(weight) +``` + +"weight associated with matching the corresponding podAffinityTerm,\nin the range 1-100." + +## obj spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm + +"Required. A pod affinity term, associated with the corresponding weight." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMatchLabelKeys + +```ts +withMatchLabelKeys(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMatchLabelKeysMixin + +```ts +withMatchLabelKeysMixin(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMismatchLabelKeys + +```ts +withMismatchLabelKeys(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMismatchLabelKeysMixin + +```ts +withMismatchLabelKeysMixin(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withNamespaces + +```ts +withNamespaces(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withNamespacesMixin + +```ts +withNamespacesMixin(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withTopologyKey + +```ts +withTopologyKey(topologyKey) +``` + +"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." + +## obj spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector + +"A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector + +"A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution + +"If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMatchLabelKeys + +```ts +withMatchLabelKeys(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMatchLabelKeysMixin + +```ts +withMatchLabelKeysMixin(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMismatchLabelKeys + +```ts +withMismatchLabelKeys(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMismatchLabelKeysMixin + +```ts +withMismatchLabelKeysMixin(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withNamespaces + +```ts +withNamespaces(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withNamespacesMixin + +```ts +withNamespacesMixin(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withTopologyKey + +```ts +withTopologyKey(topologyKey) +``` + +"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." + +## obj spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector + +"A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector + +"A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAntiAffinity + +"Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s))." + +### fn spec.affinity.podAntiAffinity.withPreferredDuringSchedulingIgnoredDuringExecution + +```ts +withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution) +``` + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe anti-affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." + +### fn spec.affinity.podAntiAffinity.withPreferredDuringSchedulingIgnoredDuringExecutionMixin + +```ts +withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution) +``` + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe anti-affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAntiAffinity.withRequiredDuringSchedulingIgnoredDuringExecution + +```ts +withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution) +``` + +"If the anti-affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the anti-affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." + +### fn spec.affinity.podAntiAffinity.withRequiredDuringSchedulingIgnoredDuringExecutionMixin + +```ts +withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution) +``` + +"If the anti-affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the anti-affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe anti-affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.withWeight + +```ts +withWeight(weight) +``` + +"weight associated with matching the corresponding podAffinityTerm,\nin the range 1-100." + +## obj spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm + +"Required. A pod affinity term, associated with the corresponding weight." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMatchLabelKeys + +```ts +withMatchLabelKeys(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMatchLabelKeysMixin + +```ts +withMatchLabelKeysMixin(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMismatchLabelKeys + +```ts +withMismatchLabelKeys(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMismatchLabelKeysMixin + +```ts +withMismatchLabelKeysMixin(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withNamespaces + +```ts +withNamespaces(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withNamespacesMixin + +```ts +withNamespacesMixin(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withTopologyKey + +```ts +withTopologyKey(topologyKey) +``` + +"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." + +## obj spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector + +"A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector + +"A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution + +"If the anti-affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the anti-affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMatchLabelKeys + +```ts +withMatchLabelKeys(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMatchLabelKeysMixin + +```ts +withMatchLabelKeysMixin(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMismatchLabelKeys + +```ts +withMismatchLabelKeys(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMismatchLabelKeysMixin + +```ts +withMismatchLabelKeysMixin(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withNamespaces + +```ts +withNamespaces(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withNamespacesMixin + +```ts +withNamespacesMixin(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withTopologyKey + +```ts +withTopologyKey(topologyKey) +``` + +"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." + +## obj spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector + +"A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector + +"A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.env + +"List of environment variables to set in the container.\nCannot be updated." + +### fn spec.env.withName + +```ts +withName(name) +``` + +"Name of the environment variable. Must be a C_IDENTIFIER." + +### fn spec.env.withValue + +```ts +withValue(value) +``` + +"Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n\"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\"." + +## obj spec.env.valueFrom + +"Source for the environment variable's value. Cannot be used if value is not empty." + +## obj spec.env.valueFrom.configMapKeyRef + +"Selects a key of a ConfigMap." + +### fn spec.env.valueFrom.configMapKeyRef.withKey + +```ts +withKey(key) +``` + +"The key to select." + +### fn spec.env.valueFrom.configMapKeyRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.env.valueFrom.configMapKeyRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the ConfigMap or its key must be defined" + +## obj spec.env.valueFrom.fieldRef + +"Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." + +### fn spec.env.valueFrom.fieldRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." + +### fn spec.env.valueFrom.fieldRef.withFieldPath + +```ts +withFieldPath(fieldPath) +``` + +"Path of the field to select in the specified API version." + +## obj spec.env.valueFrom.resourceFieldRef + +"Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." + +### fn spec.env.valueFrom.resourceFieldRef.withContainerName + +```ts +withContainerName(containerName) +``` + +"Container name: required for volumes, optional for env vars" + +### fn spec.env.valueFrom.resourceFieldRef.withDivisor + +```ts +withDivisor(divisor) +``` + +"Specifies the output format of the exposed resources, defaults to \"1\ + +### fn spec.env.valueFrom.resourceFieldRef.withResource + +```ts +withResource(resource) +``` + +"Required: resource to select" + +## obj spec.env.valueFrom.secretKeyRef + +"Selects a key of a secret in the pod's namespace" + +### fn spec.env.valueFrom.secretKeyRef.withKey + +```ts +withKey(key) +``` + +"The key of the secret to select from. Must be a valid secret key." + +### fn spec.env.valueFrom.secretKeyRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.env.valueFrom.secretKeyRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the Secret or its key must be defined" + +## obj spec.envFrom + +"List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." + +### fn spec.envFrom.withPrefix + +```ts +withPrefix(prefix) +``` + +"An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER." + +## obj spec.envFrom.configMapRef + +"The ConfigMap to select from" + +### fn spec.envFrom.configMapRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.envFrom.configMapRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the ConfigMap must be defined" + +## obj spec.envFrom.secretRef + +"The Secret to select from" + +### fn spec.envFrom.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.envFrom.secretRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the Secret must be defined" + +## obj spec.imagePullSecrets + +"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\nIf specified, these secrets will be passed to individual puller implementations for them to use. For example,\nin the case of docker, only DockerConfig type secrets are honored.\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod\nSetting ImagePullSecrets will replace any secrets that have been\npropagated to a controller Deployment, typically via packagePullSecrets." + +### fn spec.imagePullSecrets.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.metadata + +"Metadata that will be added to the provider Pod." + +### fn spec.metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/" + +### fn spec.metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/" + +**Note:** This function appends passed data to existing values + +### fn spec.metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and\ncategorize (scope and select) objects. This will only affect\nlabels on the pod, not the pod selector. Labels will be merged\nwith internal labels used by crossplane, and labels with a\ncrossplane.io key might be overwritten.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +### fn spec.metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and\ncategorize (scope and select) objects. This will only affect\nlabels on the pod, not the pod selector. Labels will be merged\nwith internal labels used by crossplane, and labels with a\ncrossplane.io key might be overwritten.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +**Note:** This function appends passed data to existing values + +## obj spec.podSecurityContext + +"PodSecurityContext holds pod-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field." + +### fn spec.podSecurityContext.withFsGroup + +```ts +withFsGroup(fsGroup) +``` + +"A special supplemental group that applies to all containers in a pod.\nSome volume types allow the Kubelet to change the ownership of that volume\nto be owned by the pod:\n\n\n1. The owning GID will be the FSGroup\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\n3. The permission bits are OR'd with rw-rw----\n\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.podSecurityContext.withFsGroupChangePolicy + +```ts +withFsGroupChangePolicy(fsGroupChangePolicy) +``` + +"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume\nbefore being exposed inside Pod. This field will only apply to\nvolume types which support fsGroup based ownership(and permissions).\nIt will have no effect on ephemeral volume types such as: secret, configmaps\nand emptydir.\nValid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.podSecurityContext.withRunAsGroup + +```ts +withRunAsGroup(runAsGroup) +``` + +"The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.podSecurityContext.withRunAsNonRoot + +```ts +withRunAsNonRoot(runAsNonRoot) +``` + +"Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." + +### fn spec.podSecurityContext.withRunAsUser + +```ts +withRunAsUser(runAsUser) +``` + +"The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.podSecurityContext.withSupplementalGroups + +```ts +withSupplementalGroups(supplementalGroups) +``` + +"A list of groups applied to the first process run in each container, in addition\nto the container's primary GID, the fsGroup (if specified), and group memberships\ndefined in the container image for the uid of the container process. If unspecified,\nno additional groups are added to any container. Note that group memberships\ndefined in the container image for the uid of the container process are still effective,\neven if they are not included in this list.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.podSecurityContext.withSupplementalGroupsMixin + +```ts +withSupplementalGroupsMixin(supplementalGroups) +``` + +"A list of groups applied to the first process run in each container, in addition\nto the container's primary GID, the fsGroup (if specified), and group memberships\ndefined in the container image for the uid of the container process. If unspecified,\nno additional groups are added to any container. Note that group memberships\ndefined in the container image for the uid of the container process are still effective,\neven if they are not included in this list.\nNote that this field cannot be set when spec.os.name is windows." + +**Note:** This function appends passed data to existing values + +### fn spec.podSecurityContext.withSysctls + +```ts +withSysctls(sysctls) +``` + +"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.podSecurityContext.withSysctlsMixin + +```ts +withSysctlsMixin(sysctls) +``` + +"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\nNote that this field cannot be set when spec.os.name is windows." + +**Note:** This function appends passed data to existing values + +## obj spec.podSecurityContext.appArmorProfile + +"appArmorProfile is the AppArmor options to use by the containers in this pod.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.podSecurityContext.appArmorProfile.withLocalhostProfile + +```ts +withLocalhostProfile(localhostProfile) +``` + +"localhostProfile indicates a profile loaded on the node that should be used.\nThe profile must be preconfigured on the node to work.\nMust match the loaded name of the profile.\nMust be set if and only if type is \"Localhost\"." + +### fn spec.podSecurityContext.appArmorProfile.withType + +```ts +withType(type) +``` + +"type indicates which kind of AppArmor profile will be applied.\nValid options are:\n Localhost - a profile pre-loaded on the node.\n RuntimeDefault - the container runtime's default profile.\n Unconfined - no AppArmor enforcement." + +## obj spec.podSecurityContext.seLinuxOptions + +"The SELinux context to be applied to all containers.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in SecurityContext. If set in\nboth SecurityContext and PodSecurityContext, the value specified in SecurityContext\ntakes precedence for that container.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.podSecurityContext.seLinuxOptions.withLevel + +```ts +withLevel(level) +``` + +"Level is SELinux level label that applies to the container." + +### fn spec.podSecurityContext.seLinuxOptions.withRole + +```ts +withRole(role) +``` + +"Role is a SELinux role label that applies to the container." + +### fn spec.podSecurityContext.seLinuxOptions.withType + +```ts +withType(type) +``` + +"Type is a SELinux type label that applies to the container." + +### fn spec.podSecurityContext.seLinuxOptions.withUser + +```ts +withUser(user) +``` + +"User is a SELinux user label that applies to the container." + +## obj spec.podSecurityContext.seccompProfile + +"The seccomp options to use by the containers in this pod.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.podSecurityContext.seccompProfile.withLocalhostProfile + +```ts +withLocalhostProfile(localhostProfile) +``` + +"localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." + +### fn spec.podSecurityContext.seccompProfile.withType + +```ts +withType(type) +``` + +"type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." + +## obj spec.podSecurityContext.sysctls + +"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.podSecurityContext.sysctls.withName + +```ts +withName(name) +``` + +"Name of a property to set" + +### fn spec.podSecurityContext.sysctls.withValue + +```ts +withValue(value) +``` + +"Value of a property to set" + +## obj spec.podSecurityContext.windowsOptions + +"The Windows specific settings applied to all containers.\nIf unspecified, the options within a container's SecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." + +### fn spec.podSecurityContext.windowsOptions.withGmsaCredentialSpec + +```ts +withGmsaCredentialSpec(gmsaCredentialSpec) +``` + +"GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." + +### fn spec.podSecurityContext.windowsOptions.withGmsaCredentialSpecName + +```ts +withGmsaCredentialSpecName(gmsaCredentialSpecName) +``` + +"GMSACredentialSpecName is the name of the GMSA credential spec to use." + +### fn spec.podSecurityContext.windowsOptions.withHostProcess + +```ts +withHostProcess(hostProcess) +``` + +"HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." + +### fn spec.podSecurityContext.windowsOptions.withRunAsUserName + +```ts +withRunAsUserName(runAsUserName) +``` + +"The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." + +## obj spec.ports + +"List of container ports to expose on the container" + +### fn spec.ports.withContainerPort + +```ts +withContainerPort(containerPort) +``` + +"Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 < x < 65536." + +### fn spec.ports.withHostIP + +```ts +withHostIP(hostIP) +``` + +"What host IP to bind the external port to." + +### fn spec.ports.withHostPort + +```ts +withHostPort(hostPort) +``` + +"Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 < x < 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this." + +### fn spec.ports.withName + +```ts +withName(name) +``` + +"If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services." + +### fn spec.ports.withProtocol + +```ts +withProtocol(protocol) +``` + +"Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\"." + +## obj spec.resources + +"Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/" + +### fn spec.resources.withClaims + +```ts +withClaims(claims) +``` + +"Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." + +### fn spec.resources.withClaimsMixin + +```ts +withClaimsMixin(claims) +``` + +"Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." + +**Note:** This function appends passed data to existing values + +### fn spec.resources.withLimits + +```ts +withLimits(limits) +``` + +"Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.resources.withLimitsMixin + +```ts +withLimitsMixin(limits) +``` + +"Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +**Note:** This function appends passed data to existing values + +### fn spec.resources.withRequests + +```ts +withRequests(requests) +``` + +"Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.resources.withRequestsMixin + +```ts +withRequestsMixin(requests) +``` + +"Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +**Note:** This function appends passed data to existing values + +## obj spec.resources.claims + +"Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." + +### fn spec.resources.claims.withName + +```ts +withName(name) +``` + +"Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container." + +## obj spec.securityContext + +"SecurityContext holds container-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field." + +### fn spec.securityContext.withAllowPrivilegeEscalation + +```ts +withAllowPrivilegeEscalation(allowPrivilegeEscalation) +``` + +"AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.securityContext.withPrivileged + +```ts +withPrivileged(privileged) +``` + +"Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.securityContext.withProcMount + +```ts +withProcMount(procMount) +``` + +"procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.securityContext.withReadOnlyRootFilesystem + +```ts +withReadOnlyRootFilesystem(readOnlyRootFilesystem) +``` + +"Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.securityContext.withRunAsGroup + +```ts +withRunAsGroup(runAsGroup) +``` + +"The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.securityContext.withRunAsNonRoot + +```ts +withRunAsNonRoot(runAsNonRoot) +``` + +"Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." + +### fn spec.securityContext.withRunAsUser + +```ts +withRunAsUser(runAsUser) +``` + +"The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." + +## obj spec.securityContext.appArmorProfile + +"appArmorProfile is the AppArmor options to use by this container. If set, this profile\noverrides the pod's appArmorProfile.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.securityContext.appArmorProfile.withLocalhostProfile + +```ts +withLocalhostProfile(localhostProfile) +``` + +"localhostProfile indicates a profile loaded on the node that should be used.\nThe profile must be preconfigured on the node to work.\nMust match the loaded name of the profile.\nMust be set if and only if type is \"Localhost\"." + +### fn spec.securityContext.appArmorProfile.withType + +```ts +withType(type) +``` + +"type indicates which kind of AppArmor profile will be applied.\nValid options are:\n Localhost - a profile pre-loaded on the node.\n RuntimeDefault - the container runtime's default profile.\n Unconfined - no AppArmor enforcement." + +## obj spec.securityContext.capabilities + +"The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.securityContext.capabilities.withAdd + +```ts +withAdd(add) +``` + +"Added capabilities" + +### fn spec.securityContext.capabilities.withAddMixin + +```ts +withAddMixin(add) +``` + +"Added capabilities" + +**Note:** This function appends passed data to existing values + +### fn spec.securityContext.capabilities.withDrop + +```ts +withDrop(drop) +``` + +"Removed capabilities" + +### fn spec.securityContext.capabilities.withDropMixin + +```ts +withDropMixin(drop) +``` + +"Removed capabilities" + +**Note:** This function appends passed data to existing values + +## obj spec.securityContext.seLinuxOptions + +"The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.securityContext.seLinuxOptions.withLevel + +```ts +withLevel(level) +``` + +"Level is SELinux level label that applies to the container." + +### fn spec.securityContext.seLinuxOptions.withRole + +```ts +withRole(role) +``` + +"Role is a SELinux role label that applies to the container." + +### fn spec.securityContext.seLinuxOptions.withType + +```ts +withType(type) +``` + +"Type is a SELinux type label that applies to the container." + +### fn spec.securityContext.seLinuxOptions.withUser + +```ts +withUser(user) +``` + +"User is a SELinux user label that applies to the container." + +## obj spec.securityContext.seccompProfile + +"The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.securityContext.seccompProfile.withLocalhostProfile + +```ts +withLocalhostProfile(localhostProfile) +``` + +"localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." + +### fn spec.securityContext.seccompProfile.withType + +```ts +withType(type) +``` + +"type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." + +## obj spec.securityContext.windowsOptions + +"The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." + +### fn spec.securityContext.windowsOptions.withGmsaCredentialSpec + +```ts +withGmsaCredentialSpec(gmsaCredentialSpec) +``` + +"GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." + +### fn spec.securityContext.windowsOptions.withGmsaCredentialSpecName + +```ts +withGmsaCredentialSpecName(gmsaCredentialSpecName) +``` + +"GMSACredentialSpecName is the name of the GMSA credential spec to use." + +### fn spec.securityContext.windowsOptions.withHostProcess + +```ts +withHostProcess(hostProcess) +``` + +"HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." + +### fn spec.securityContext.windowsOptions.withRunAsUserName + +```ts +withRunAsUserName(runAsUserName) +``` + +"The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." + +## obj spec.tolerations + +"If specified, the pod's tolerations." + +### fn spec.tolerations.withEffect + +```ts +withEffect(effect) +``` + +"Effect indicates the taint effect to match. Empty means match all taint effects.\nWhen specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute." + +### fn spec.tolerations.withKey + +```ts +withKey(key) +``` + +"Key is the taint key that the toleration applies to. Empty means match all taint keys.\nIf the key is empty, operator must be Exists; this combination means to match all values and all keys." + +### fn spec.tolerations.withOperator + +```ts +withOperator(operator) +``` + +"Operator represents a key's relationship to the value.\nValid operators are Exists and Equal. Defaults to Equal.\nExists is equivalent to wildcard for value, so that a pod can\ntolerate all taints of a particular category." + +### fn spec.tolerations.withTolerationSeconds + +```ts +withTolerationSeconds(tolerationSeconds) +``` + +"TolerationSeconds represents the period of time the toleration (which must be\nof effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,\nit is not set, which means tolerate the taint forever (do not evict). Zero and\nnegative values will be treated as 0 (evict immediately) by the system." + +### fn spec.tolerations.withValue + +```ts +withValue(value) +``` + +"Value is the taint value the toleration matches to.\nIf the operator is Exists, the value should be empty, otherwise just a regular string." + +## obj spec.volumeMounts + +"List of VolumeMounts to mount into the container's filesystem.\nCannot be updated." + +### fn spec.volumeMounts.withMountPath + +```ts +withMountPath(mountPath) +``` + +"Path within the container at which the volume should be mounted. Must\nnot contain ':'." + +### fn spec.volumeMounts.withMountPropagation + +```ts +withMountPropagation(mountPropagation) +``` + +"mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10.\nWhen RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified\n(which defaults to None)." + +### fn spec.volumeMounts.withName + +```ts +withName(name) +``` + +"This must match the Name of a Volume." + +### fn spec.volumeMounts.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false." + +### fn spec.volumeMounts.withRecursiveReadOnly + +```ts +withRecursiveReadOnly(recursiveReadOnly) +``` + +"RecursiveReadOnly specifies whether read-only mounts should be handled\nrecursively.\n\n\nIf ReadOnly is false, this field has no meaning and must be unspecified.\n\n\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made\nrecursively read-only. If this field is set to IfPossible, the mount is made\nrecursively read-only, if it is supported by the container runtime. If this\nfield is set to Enabled, the mount is made recursively read-only if it is\nsupported by the container runtime, otherwise the pod will not be started and\nan error will be generated to indicate the reason.\n\n\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to\nNone (or be unspecified, which defaults to None).\n\n\nIf this field is not specified, it is treated as an equivalent of Disabled." + +### fn spec.volumeMounts.withSubPath + +```ts +withSubPath(subPath) +``` + +"Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root)." + +### fn spec.volumeMounts.withSubPathExpr + +```ts +withSubPathExpr(subPathExpr) +``` + +"Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive." + +## obj spec.volumes + +"List of volumes that can be mounted by containers belonging to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes" + +### fn spec.volumes.withName + +```ts +withName(name) +``` + +"name of the volume.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + +## obj spec.volumes.awsElasticBlockStore + +"awsElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + +### fn spec.volumes.awsElasticBlockStore.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine" + +### fn spec.volumes.awsElasticBlockStore.withPartition + +```ts +withPartition(partition) +``` + +"partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty)." + +### fn spec.volumes.awsElasticBlockStore.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly value true will force the readOnly setting in VolumeMounts.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + +### fn spec.volumes.awsElasticBlockStore.withVolumeID + +```ts +withVolumeID(volumeID) +``` + +"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + +## obj spec.volumes.azureDisk + +"azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod." + +### fn spec.volumes.azureDisk.withCachingMode + +```ts +withCachingMode(cachingMode) +``` + +"cachingMode is the Host Caching mode: None, Read Only, Read Write." + +### fn spec.volumes.azureDisk.withDiskName + +```ts +withDiskName(diskName) +``` + +"diskName is the Name of the data disk in the blob storage" + +### fn spec.volumes.azureDisk.withDiskURI + +```ts +withDiskURI(diskURI) +``` + +"diskURI is the URI of data disk in the blob storage" + +### fn spec.volumes.azureDisk.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + +### fn spec.volumes.azureDisk.withKind + +```ts +withKind(kind) +``` + +"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared" + +### fn spec.volumes.azureDisk.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +## obj spec.volumes.azureFile + +"azureFile represents an Azure File Service mount on the host and bind mount to the pod." + +### fn spec.volumes.azureFile.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +### fn spec.volumes.azureFile.withSecretName + +```ts +withSecretName(secretName) +``` + +"secretName is the name of secret that contains Azure Storage Account Name and Key" + +### fn spec.volumes.azureFile.withShareName + +```ts +withShareName(shareName) +``` + +"shareName is the azure share Name" + +## obj spec.volumes.cephfs + +"cephFS represents a Ceph FS mount on the host that shares a pod's lifetime" + +### fn spec.volumes.cephfs.withMonitors + +```ts +withMonitors(monitors) +``` + +"monitors is Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + +### fn spec.volumes.cephfs.withMonitorsMixin + +```ts +withMonitorsMixin(monitors) +``` + +"monitors is Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.cephfs.withPath + +```ts +withPath(path) +``` + +"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /" + +### fn spec.volumes.cephfs.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + +### fn spec.volumes.cephfs.withSecretFile + +```ts +withSecretFile(secretFile) +``` + +"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + +### fn spec.volumes.cephfs.withUser + +```ts +withUser(user) +``` + +"user is optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + +## obj spec.volumes.cephfs.secretRef + +"secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + +### fn spec.volumes.cephfs.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.volumes.cinder + +"cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" + +### fn spec.volumes.cinder.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" + +### fn spec.volumes.cinder.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" + +### fn spec.volumes.cinder.withVolumeID + +```ts +withVolumeID(volumeID) +``` + +"volumeID used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" + +## obj spec.volumes.cinder.secretRef + +"secretRef is optional: points to a secret object containing parameters used to connect\nto OpenStack." + +### fn spec.volumes.cinder.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.volumes.configMap + +"configMap represents a configMap that should populate this volume" + +### fn spec.volumes.configMap.withDefaultMode + +```ts +withDefaultMode(defaultMode) +``` + +"defaultMode is optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.volumes.configMap.withItems + +```ts +withItems(items) +``` + +"items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.volumes.configMap.withItemsMixin + +```ts +withItemsMixin(items) +``` + +"items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.configMap.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.volumes.configMap.withOptional + +```ts +withOptional(optional) +``` + +"optional specify whether the ConfigMap or its keys must be defined" + +## obj spec.volumes.configMap.items + +"items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.volumes.configMap.items.withKey + +```ts +withKey(key) +``` + +"key is the key to project." + +### fn spec.volumes.configMap.items.withMode + +```ts +withMode(mode) +``` + +"mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.volumes.configMap.items.withPath + +```ts +withPath(path) +``` + +"path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." + +## obj spec.volumes.csi + +"csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature)." + +### fn spec.volumes.csi.withDriver + +```ts +withDriver(driver) +``` + +"driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster." + +### fn spec.volumes.csi.withFsType + +```ts +withFsType(fsType) +``` + +"fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply." + +### fn spec.volumes.csi.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly specifies a read-only configuration for the volume.\nDefaults to false (read/write)." + +### fn spec.volumes.csi.withVolumeAttributes + +```ts +withVolumeAttributes(volumeAttributes) +``` + +"volumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values." + +### fn spec.volumes.csi.withVolumeAttributesMixin + +```ts +withVolumeAttributesMixin(volumeAttributes) +``` + +"volumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values." + +**Note:** This function appends passed data to existing values + +## obj spec.volumes.csi.nodePublishSecretRef + +"nodePublishSecretRef is a reference to the secret object containing\nsensitive information to pass to the CSI driver to complete the CSI\nNodePublishVolume and NodeUnpublishVolume calls.\nThis field is optional, and may be empty if no secret is required. If the\nsecret object contains more than one secret, all secret references are passed." + +### fn spec.volumes.csi.nodePublishSecretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.volumes.downwardAPI + +"downwardAPI represents downward API about the pod that should populate this volume" + +### fn spec.volumes.downwardAPI.withDefaultMode + +```ts +withDefaultMode(defaultMode) +``` + +"Optional: mode bits to use on created files by default. Must be a\nOptional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.volumes.downwardAPI.withItems + +```ts +withItems(items) +``` + +"Items is a list of downward API volume file" + +### fn spec.volumes.downwardAPI.withItemsMixin + +```ts +withItemsMixin(items) +``` + +"Items is a list of downward API volume file" + +**Note:** This function appends passed data to existing values + +## obj spec.volumes.downwardAPI.items + +"Items is a list of downward API volume file" + +### fn spec.volumes.downwardAPI.items.withMode + +```ts +withMode(mode) +``` + +"Optional: mode bits used to set permissions on this file, must be an octal value\nbetween 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.volumes.downwardAPI.items.withPath + +```ts +withPath(path) +``` + +"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'" + +## obj spec.volumes.downwardAPI.items.fieldRef + +"Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported." + +### fn spec.volumes.downwardAPI.items.fieldRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." + +### fn spec.volumes.downwardAPI.items.fieldRef.withFieldPath + +```ts +withFieldPath(fieldPath) +``` + +"Path of the field to select in the specified API version." + +## obj spec.volumes.downwardAPI.items.resourceFieldRef + +"Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." + +### fn spec.volumes.downwardAPI.items.resourceFieldRef.withContainerName + +```ts +withContainerName(containerName) +``` + +"Container name: required for volumes, optional for env vars" + +### fn spec.volumes.downwardAPI.items.resourceFieldRef.withDivisor + +```ts +withDivisor(divisor) +``` + +"Specifies the output format of the exposed resources, defaults to \"1\ + +### fn spec.volumes.downwardAPI.items.resourceFieldRef.withResource + +```ts +withResource(resource) +``` + +"Required: resource to select" + +## obj spec.volumes.emptyDir + +"emptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + +### fn spec.volumes.emptyDir.withMedium + +```ts +withMedium(medium) +``` + +"medium represents what type of storage medium should back this directory.\nThe default is \"\" which means to use the node's default medium.\nMust be an empty string (default) or Memory.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + +### fn spec.volumes.emptyDir.withSizeLimit + +```ts +withSizeLimit(sizeLimit) +``` + +"sizeLimit is the total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + +## obj spec.volumes.ephemeral + +"ephemeral represents a volume that is handled by a cluster storage driver.\nThe volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,\nand deleted when the pod is removed.\n\n\nUse this if:\na) the volume is only needed while the pod runs,\nb) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and\nd) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\n\nUse PersistentVolumeClaim or one of the vendor-specific\nAPIs for volumes that persist for longer than the lifecycle\nof an individual pod.\n\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to\nbe used that way - see the documentation of the driver for\nmore information.\n\n\nA pod can use both types of ephemeral volumes and\npersistent volumes at the same time." + +## obj spec.volumes.ephemeral.volumeClaimTemplate + +"Will be used to create a stand-alone PVC to provision the volume.\nThe pod in which this EphemeralVolumeSource is embedded will be the\nowner of the PVC, i.e. the PVC will be deleted together with the\npod. The name of the PVC will be `-` where\n`` is the name from the `PodSpec.Volumes` array\nentry. Pod validation will reject the pod if the concatenated name\nis not valid for a PVC (for example, too long).\n\n\nAn existing PVC with that name that is not owned by the pod\nwill *not* be used for the pod to avoid using an unrelated\nvolume by mistake. Starting the pod is then blocked until\nthe unrelated PVC is removed. If such a pre-created PVC is\nmeant to be used by the pod, the PVC has to updated with an\nowner reference to the pod once the pod exists. Normally\nthis should not be necessary, but it may be useful when\nmanually reconstructing a broken cluster.\n\n\nThis field is read-only and no changes will be made by Kubernetes\nto the PVC after it has been created.\n\n\nRequired, must not be nil." + +## obj spec.volumes.ephemeral.volumeClaimTemplate.metadata + +"May contain labels and annotations that will be copied into the PVC\nwhen creating it. No other fields are allowed and will be rejected during\nvalidation." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + + + +### fn spec.volumes.ephemeral.volumeClaimTemplate.metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + + + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.ephemeral.volumeClaimTemplate.metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + + + +### fn spec.volumes.ephemeral.volumeClaimTemplate.metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + + + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.ephemeral.volumeClaimTemplate.metadata.withLabels + +```ts +withLabels(labels) +``` + + + +### fn spec.volumes.ephemeral.volumeClaimTemplate.metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + + + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.ephemeral.volumeClaimTemplate.metadata.withName + +```ts +withName(name) +``` + + + +### fn spec.volumes.ephemeral.volumeClaimTemplate.metadata.withNamespace + +```ts +withNamespace(namespace) +``` + + + +## obj spec.volumes.ephemeral.volumeClaimTemplate.spec + +"The specification for the PersistentVolumeClaim. The entire content is\ncopied unchanged into the PVC that gets created from this\ntemplate. The same fields as in a PersistentVolumeClaim\nare also valid here." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.withAccessModes + +```ts +withAccessModes(accessModes) +``` + +"accessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1" + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.withAccessModesMixin + +```ts +withAccessModesMixin(accessModes) +``` + +"accessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1" + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.withStorageClassName + +```ts +withStorageClassName(storageClassName) +``` + +"storageClassName is the name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1" + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.withVolumeAttributesClassName + +```ts +withVolumeAttributesClassName(volumeAttributesClassName) +``` + +"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.\nIf specified, the CSI driver will create or update the volume with the attributes defined\nin the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,\nit can be changed after the claim is created. An empty string value means that no VolumeAttributesClass\nwill be applied to the claim but it's not allowed to reset this field to empty string once it is set.\nIf unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass\nwill be set by the persistentvolume controller if it exists.\nIf the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be\nset to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource\nexists.\nMore info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/\n(Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.withVolumeMode + +```ts +withVolumeMode(volumeMode) +``` + +"volumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.withVolumeName + +```ts +withVolumeName(volumeName) +``` + +"volumeName is the binding reference to the PersistentVolume backing this claim." + +## obj spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSource + +"dataSource field can be used to specify either:\n* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)\n* An existing PVC (PersistentVolumeClaim)\nIf the provisioner or an external controller can support the specified data source,\nit will create a new volume based on the contents of the specified data source.\nWhen the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,\nand dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.\nIf the namespace is specified, then dataSourceRef will not be copied to dataSource." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSource.withApiGroup + +```ts +withApiGroup(apiGroup) +``` + +"APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSource.withKind + +```ts +withKind(kind) +``` + +"Kind is the type of resource being referenced" + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSource.withName + +```ts +withName(name) +``` + +"Name is the name of resource being referenced" + +## obj spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSourceRef + +"dataSourceRef specifies the object from which to populate the volume with data, if a non-empty\nvolume is desired. This may be any object from a non-empty API group (non\ncore object) or a PersistentVolumeClaim object.\nWhen this field is specified, volume binding will only succeed if the type of\nthe specified object matches some installed volume populator or dynamic\nprovisioner.\nThis field will replace the functionality of the dataSource field and as such\nif both fields are non-empty, they must have the same value. For backwards\ncompatibility, when namespace isn't specified in dataSourceRef,\nboth fields (dataSource and dataSourceRef) will be set to the same\nvalue automatically if one of them is empty and the other is non-empty.\nWhen namespace is specified in dataSourceRef,\ndataSource isn't set to the same value and must be empty.\nThere are three important differences between dataSource and dataSourceRef:\n* While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\n(Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSourceRef.withApiGroup + +```ts +withApiGroup(apiGroup) +``` + +"APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSourceRef.withKind + +```ts +withKind(kind) +``` + +"Kind is the type of resource being referenced" + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSourceRef.withName + +```ts +withName(name) +``` + +"Name is the name of resource being referenced" + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSourceRef.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace is the namespace of resource being referenced\nNote that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.\n(Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled." + +## obj spec.volumes.ephemeral.volumeClaimTemplate.spec.resources + +"resources represents the minimum resources the volume should have.\nIf RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements\nthat are lower than previous value but must still be higher than capacity recorded in the\nstatus field of the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.resources.withLimits + +```ts +withLimits(limits) +``` + +"Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.resources.withLimitsMixin + +```ts +withLimitsMixin(limits) +``` + +"Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.resources.withRequests + +```ts +withRequests(requests) +``` + +"Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.resources.withRequestsMixin + +```ts +withRequestsMixin(requests) +``` + +"Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +**Note:** This function appends passed data to existing values + +## obj spec.volumes.ephemeral.volumeClaimTemplate.spec.selector + +"selector is a label query over volumes to consider for binding." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.volumes.fc + +"fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod." + +### fn spec.volumes.fc.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine" + +### fn spec.volumes.fc.withLun + +```ts +withLun(lun) +``` + +"lun is Optional: FC target lun number" + +### fn spec.volumes.fc.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +### fn spec.volumes.fc.withTargetWWNs + +```ts +withTargetWWNs(targetWWNs) +``` + +"targetWWNs is Optional: FC target worldwide names (WWNs)" + +### fn spec.volumes.fc.withTargetWWNsMixin + +```ts +withTargetWWNsMixin(targetWWNs) +``` + +"targetWWNs is Optional: FC target worldwide names (WWNs)" + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.fc.withWwids + +```ts +withWwids(wwids) +``` + +"wwids Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously." + +### fn spec.volumes.fc.withWwidsMixin + +```ts +withWwidsMixin(wwids) +``` + +"wwids Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously." + +**Note:** This function appends passed data to existing values + +## obj spec.volumes.flexVolume + +"flexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin." + +### fn spec.volumes.flexVolume.withDriver + +```ts +withDriver(driver) +``` + +"driver is the name of the driver to use for this volume." + +### fn spec.volumes.flexVolume.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script." + +### fn spec.volumes.flexVolume.withOptions + +```ts +withOptions(options) +``` + +"options is Optional: this field holds extra command options if any." + +### fn spec.volumes.flexVolume.withOptionsMixin + +```ts +withOptionsMixin(options) +``` + +"options is Optional: this field holds extra command options if any." + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.flexVolume.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly is Optional: defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +## obj spec.volumes.flexVolume.secretRef + +"secretRef is Optional: secretRef is reference to the secret object containing\nsensitive information to pass to the plugin scripts. This may be\nempty if no secret object is specified. If the secret object\ncontains more than one secret, all secrets are passed to the plugin\nscripts." + +### fn spec.volumes.flexVolume.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.volumes.flocker + +"flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running" + +### fn spec.volumes.flocker.withDatasetName + +```ts +withDatasetName(datasetName) +``` + +"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker\nshould be considered as deprecated" + +### fn spec.volumes.flocker.withDatasetUUID + +```ts +withDatasetUUID(datasetUUID) +``` + +"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset" + +## obj spec.volumes.gcePersistentDisk + +"gcePersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + +### fn spec.volumes.gcePersistentDisk.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine" + +### fn spec.volumes.gcePersistentDisk.withPartition + +```ts +withPartition(partition) +``` + +"partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + +### fn spec.volumes.gcePersistentDisk.withPdName + +```ts +withPdName(pdName) +``` + +"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + +### fn spec.volumes.gcePersistentDisk.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + +## obj spec.volumes.gitRepo + +"gitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container." + +### fn spec.volumes.gitRepo.withDirectory + +```ts +withDirectory(directory) +``` + +"directory is the target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name." + +### fn spec.volumes.gitRepo.withRepository + +```ts +withRepository(repository) +``` + +"repository is the URL" + +### fn spec.volumes.gitRepo.withRevision + +```ts +withRevision(revision) +``` + +"revision is the commit hash for the specified revision." + +## obj spec.volumes.glusterfs + +"glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md" + +### fn spec.volumes.glusterfs.withEndpoints + +```ts +withEndpoints(endpoints) +``` + +"endpoints is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + +### fn spec.volumes.glusterfs.withPath + +```ts +withPath(path) +``` + +"path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + +### fn spec.volumes.glusterfs.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + +## obj spec.volumes.hostPath + +"hostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write." + +### fn spec.volumes.hostPath.withPath + +```ts +withPath(path) +``` + +"path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + +### fn spec.volumes.hostPath.withType + +```ts +withType(type) +``` + +"type for HostPath Volume\nDefaults to \"\"\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + +## obj spec.volumes.iscsi + +"iscsi represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.k8s.io/volumes/iscsi/README.md" + +### fn spec.volumes.iscsi.withChapAuthDiscovery + +```ts +withChapAuthDiscovery(chapAuthDiscovery) +``` + +"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication" + +### fn spec.volumes.iscsi.withChapAuthSession + +```ts +withChapAuthSession(chapAuthSession) +``` + +"chapAuthSession defines whether support iSCSI Session CHAP authentication" + +### fn spec.volumes.iscsi.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine" + +### fn spec.volumes.iscsi.withInitiatorName + +```ts +withInitiatorName(initiatorName) +``` + +"initiatorName is the custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n: will be created for the connection." + +### fn spec.volumes.iscsi.withIqn + +```ts +withIqn(iqn) +``` + +"iqn is the target iSCSI Qualified Name." + +### fn spec.volumes.iscsi.withIscsiInterface + +```ts +withIscsiInterface(iscsiInterface) +``` + +"iscsiInterface is the interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp)." + +### fn spec.volumes.iscsi.withLun + +```ts +withLun(lun) +``` + +"lun represents iSCSI Target Lun number." + +### fn spec.volumes.iscsi.withPortals + +```ts +withPortals(portals) +``` + +"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." + +### fn spec.volumes.iscsi.withPortalsMixin + +```ts +withPortalsMixin(portals) +``` + +"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.iscsi.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false." + +### fn spec.volumes.iscsi.withTargetPortal + +```ts +withTargetPortal(targetPortal) +``` + +"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." + +## obj spec.volumes.iscsi.secretRef + +"secretRef is the CHAP Secret for iSCSI target and initiator authentication" + +### fn spec.volumes.iscsi.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.volumes.nfs + +"nfs represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + +### fn spec.volumes.nfs.withPath + +```ts +withPath(path) +``` + +"path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + +### fn spec.volumes.nfs.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly here will force the NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + +### fn spec.volumes.nfs.withServer + +```ts +withServer(server) +``` + +"server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + +## obj spec.volumes.persistentVolumeClaim + +"persistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + +### fn spec.volumes.persistentVolumeClaim.withClaimName + +```ts +withClaimName(claimName) +``` + +"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + +### fn spec.volumes.persistentVolumeClaim.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly Will force the ReadOnly setting in VolumeMounts.\nDefault false." + +## obj spec.volumes.photonPersistentDisk + +"photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" + +### fn spec.volumes.photonPersistentDisk.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + +### fn spec.volumes.photonPersistentDisk.withPdID + +```ts +withPdID(pdID) +``` + +"pdID is the ID that identifies Photon Controller persistent disk" + +## obj spec.volumes.portworxVolume + +"portworxVolume represents a portworx volume attached and mounted on kubelets host machine" + +### fn spec.volumes.portworxVolume.withFsType + +```ts +withFsType(fsType) +``` + +"fSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified." + +### fn spec.volumes.portworxVolume.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +### fn spec.volumes.portworxVolume.withVolumeID + +```ts +withVolumeID(volumeID) +``` + +"volumeID uniquely identifies a Portworx volume" + +## obj spec.volumes.projected + +"projected items for all in one resources secrets, configmaps, and downward API" + +### fn spec.volumes.projected.withDefaultMode + +```ts +withDefaultMode(defaultMode) +``` + +"defaultMode are the mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.volumes.projected.withSources + +```ts +withSources(sources) +``` + +"sources is the list of volume projections" + +### fn spec.volumes.projected.withSourcesMixin + +```ts +withSourcesMixin(sources) +``` + +"sources is the list of volume projections" + +**Note:** This function appends passed data to existing values + +## obj spec.volumes.projected.sources + +"sources is the list of volume projections" + +## obj spec.volumes.projected.sources.clusterTrustBundle + +"ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field\nof ClusterTrustBundle objects in an auto-updating file.\n\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\n\nClusterTrustBundle objects can either be selected by name, or by the\ncombination of signer name and a label selector.\n\n\nKubelet performs aggressive normalization of the PEM contents written\ninto the pod filesystem. Esoteric PEM features such as inter-block\ncomments and block headers are stripped. Certificates are deduplicated.\nThe ordering of certificates within the file is arbitrary, and Kubelet\nmay change the order over time." + +### fn spec.volumes.projected.sources.clusterTrustBundle.withName + +```ts +withName(name) +``` + +"Select a single ClusterTrustBundle by object name. Mutually-exclusive\nwith signerName and labelSelector." + +### fn spec.volumes.projected.sources.clusterTrustBundle.withOptional + +```ts +withOptional(optional) +``` + +"If true, don't block pod startup if the referenced ClusterTrustBundle(s)\naren't available. If using name, then the named ClusterTrustBundle is\nallowed not to exist. If using signerName, then the combination of\nsignerName and labelSelector is allowed to match zero\nClusterTrustBundles." + +### fn spec.volumes.projected.sources.clusterTrustBundle.withPath + +```ts +withPath(path) +``` + +"Relative path from the volume root to write the bundle." + +### fn spec.volumes.projected.sources.clusterTrustBundle.withSignerName + +```ts +withSignerName(signerName) +``` + +"Select all ClusterTrustBundles that match this signer name.\nMutually-exclusive with name. The contents of all selected\nClusterTrustBundles will be unified and deduplicated." + +## obj spec.volumes.projected.sources.clusterTrustBundle.labelSelector + +"Select all ClusterTrustBundles that match this label selector. Only has\neffect if signerName is set. Mutually-exclusive with name. If unset,\ninterpreted as \"match nothing\". If set but empty, interpreted as \"match\neverything\"." + +### fn spec.volumes.projected.sources.clusterTrustBundle.labelSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.volumes.projected.sources.clusterTrustBundle.labelSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.projected.sources.clusterTrustBundle.labelSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.volumes.projected.sources.clusterTrustBundle.labelSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.volumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.volumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.volumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.volumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.volumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.volumes.projected.sources.configMap + +"configMap information about the configMap data to project" + +### fn spec.volumes.projected.sources.configMap.withItems + +```ts +withItems(items) +``` + +"items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.volumes.projected.sources.configMap.withItemsMixin + +```ts +withItemsMixin(items) +``` + +"items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.projected.sources.configMap.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.volumes.projected.sources.configMap.withOptional + +```ts +withOptional(optional) +``` + +"optional specify whether the ConfigMap or its keys must be defined" + +## obj spec.volumes.projected.sources.configMap.items + +"items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.volumes.projected.sources.configMap.items.withKey + +```ts +withKey(key) +``` + +"key is the key to project." + +### fn spec.volumes.projected.sources.configMap.items.withMode + +```ts +withMode(mode) +``` + +"mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.volumes.projected.sources.configMap.items.withPath + +```ts +withPath(path) +``` + +"path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." + +## obj spec.volumes.projected.sources.downwardAPI + +"downwardAPI information about the downwardAPI data to project" + +### fn spec.volumes.projected.sources.downwardAPI.withItems + +```ts +withItems(items) +``` + +"Items is a list of DownwardAPIVolume file" + +### fn spec.volumes.projected.sources.downwardAPI.withItemsMixin + +```ts +withItemsMixin(items) +``` + +"Items is a list of DownwardAPIVolume file" + +**Note:** This function appends passed data to existing values + +## obj spec.volumes.projected.sources.downwardAPI.items + +"Items is a list of DownwardAPIVolume file" + +### fn spec.volumes.projected.sources.downwardAPI.items.withMode + +```ts +withMode(mode) +``` + +"Optional: mode bits used to set permissions on this file, must be an octal value\nbetween 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.volumes.projected.sources.downwardAPI.items.withPath + +```ts +withPath(path) +``` + +"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'" + +## obj spec.volumes.projected.sources.downwardAPI.items.fieldRef + +"Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported." + +### fn spec.volumes.projected.sources.downwardAPI.items.fieldRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." + +### fn spec.volumes.projected.sources.downwardAPI.items.fieldRef.withFieldPath + +```ts +withFieldPath(fieldPath) +``` + +"Path of the field to select in the specified API version." + +## obj spec.volumes.projected.sources.downwardAPI.items.resourceFieldRef + +"Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." + +### fn spec.volumes.projected.sources.downwardAPI.items.resourceFieldRef.withContainerName + +```ts +withContainerName(containerName) +``` + +"Container name: required for volumes, optional for env vars" + +### fn spec.volumes.projected.sources.downwardAPI.items.resourceFieldRef.withDivisor + +```ts +withDivisor(divisor) +``` + +"Specifies the output format of the exposed resources, defaults to \"1\ + +### fn spec.volumes.projected.sources.downwardAPI.items.resourceFieldRef.withResource + +```ts +withResource(resource) +``` + +"Required: resource to select" + +## obj spec.volumes.projected.sources.secret + +"secret information about the secret data to project" + +### fn spec.volumes.projected.sources.secret.withItems + +```ts +withItems(items) +``` + +"items if unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.volumes.projected.sources.secret.withItemsMixin + +```ts +withItemsMixin(items) +``` + +"items if unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.projected.sources.secret.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.volumes.projected.sources.secret.withOptional + +```ts +withOptional(optional) +``` + +"optional field specify whether the Secret or its key must be defined" + +## obj spec.volumes.projected.sources.secret.items + +"items if unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.volumes.projected.sources.secret.items.withKey + +```ts +withKey(key) +``` + +"key is the key to project." + +### fn spec.volumes.projected.sources.secret.items.withMode + +```ts +withMode(mode) +``` + +"mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.volumes.projected.sources.secret.items.withPath + +```ts +withPath(path) +``` + +"path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." + +## obj spec.volumes.projected.sources.serviceAccountToken + +"serviceAccountToken is information about the serviceAccountToken data to project" + +### fn spec.volumes.projected.sources.serviceAccountToken.withAudience + +```ts +withAudience(audience) +``` + +"audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver." + +### fn spec.volumes.projected.sources.serviceAccountToken.withExpirationSeconds + +```ts +withExpirationSeconds(expirationSeconds) +``` + +"expirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes." + +### fn spec.volumes.projected.sources.serviceAccountToken.withPath + +```ts +withPath(path) +``` + +"path is the path relative to the mount point of the file to project the\ntoken into." + +## obj spec.volumes.quobyte + +"quobyte represents a Quobyte mount on the host that shares a pod's lifetime" + +### fn spec.volumes.quobyte.withGroup + +```ts +withGroup(group) +``` + +"group to map volume access to\nDefault is no group" + +### fn spec.volumes.quobyte.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false." + +### fn spec.volumes.quobyte.withRegistry + +```ts +withRegistry(registry) +``` + +"registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes" + +### fn spec.volumes.quobyte.withTenant + +```ts +withTenant(tenant) +``` + +"tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin" + +### fn spec.volumes.quobyte.withUser + +```ts +withUser(user) +``` + +"user to map volume access to\nDefaults to serivceaccount user" + +### fn spec.volumes.quobyte.withVolume + +```ts +withVolume(volume) +``` + +"volume is a string that references an already created Quobyte volume by name." + +## obj spec.volumes.rbd + +"rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/rbd/README.md" + +### fn spec.volumes.rbd.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine" + +### fn spec.volumes.rbd.withImage + +```ts +withImage(image) +``` + +"image is the rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +### fn spec.volumes.rbd.withKeyring + +```ts +withKeyring(keyring) +``` + +"keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +### fn spec.volumes.rbd.withMonitors + +```ts +withMonitors(monitors) +``` + +"monitors is a collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +### fn spec.volumes.rbd.withMonitorsMixin + +```ts +withMonitorsMixin(monitors) +``` + +"monitors is a collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.rbd.withPool + +```ts +withPool(pool) +``` + +"pool is the rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +### fn spec.volumes.rbd.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +### fn spec.volumes.rbd.withUser + +```ts +withUser(user) +``` + +"user is the rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +## obj spec.volumes.rbd.secretRef + +"secretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +### fn spec.volumes.rbd.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.volumes.scaleIO + +"scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes." + +### fn spec.volumes.scaleIO.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\".\nDefault is \"xfs\"." + +### fn spec.volumes.scaleIO.withGateway + +```ts +withGateway(gateway) +``` + +"gateway is the host address of the ScaleIO API Gateway." + +### fn spec.volumes.scaleIO.withProtectionDomain + +```ts +withProtectionDomain(protectionDomain) +``` + +"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage." + +### fn spec.volumes.scaleIO.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +### fn spec.volumes.scaleIO.withSslEnabled + +```ts +withSslEnabled(sslEnabled) +``` + +"sslEnabled Flag enable/disable SSL communication with Gateway, default false" + +### fn spec.volumes.scaleIO.withStorageMode + +```ts +withStorageMode(storageMode) +``` + +"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned." + +### fn spec.volumes.scaleIO.withStoragePool + +```ts +withStoragePool(storagePool) +``` + +"storagePool is the ScaleIO Storage Pool associated with the protection domain." + +### fn spec.volumes.scaleIO.withSystem + +```ts +withSystem(system) +``` + +"system is the name of the storage system as configured in ScaleIO." + +### fn spec.volumes.scaleIO.withVolumeName + +```ts +withVolumeName(volumeName) +``` + +"volumeName is the name of a volume already created in the ScaleIO system\nthat is associated with this volume source." + +## obj spec.volumes.scaleIO.secretRef + +"secretRef references to the secret for ScaleIO user and other\nsensitive information. If this is not provided, Login operation will fail." + +### fn spec.volumes.scaleIO.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.volumes.secret + +"secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret" + +### fn spec.volumes.secret.withDefaultMode + +```ts +withDefaultMode(defaultMode) +``` + +"defaultMode is Optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values\nfor mode bits. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.volumes.secret.withItems + +```ts +withItems(items) +``` + +"items If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.volumes.secret.withItemsMixin + +```ts +withItemsMixin(items) +``` + +"items If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +**Note:** This function appends passed data to existing values + +### fn spec.volumes.secret.withOptional + +```ts +withOptional(optional) +``` + +"optional field specify whether the Secret or its keys must be defined" + +### fn spec.volumes.secret.withSecretName + +```ts +withSecretName(secretName) +``` + +"secretName is the name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret" + +## obj spec.volumes.secret.items + +"items If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.volumes.secret.items.withKey + +```ts +withKey(key) +``` + +"key is the key to project." + +### fn spec.volumes.secret.items.withMode + +```ts +withMode(mode) +``` + +"mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.volumes.secret.items.withPath + +```ts +withPath(path) +``` + +"path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." + +## obj spec.volumes.storageos + +"storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes." + +### fn spec.volumes.storageos.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + +### fn spec.volumes.storageos.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +### fn spec.volumes.storageos.withVolumeName + +```ts +withVolumeName(volumeName) +``` + +"volumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace." + +### fn spec.volumes.storageos.withVolumeNamespace + +```ts +withVolumeNamespace(volumeNamespace) +``` + +"volumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod's namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to \"default\" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created." + +## obj spec.volumes.storageos.secretRef + +"secretRef specifies the secret to use for obtaining the StorageOS API\ncredentials. If not specified, default values will be attempted." + +### fn spec.volumes.storageos.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.volumes.vsphereVolume + +"vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine" + +### fn spec.volumes.vsphereVolume.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + +### fn spec.volumes.vsphereVolume.withStoragePolicyID + +```ts +withStoragePolicyID(storagePolicyID) +``` + +"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName." + +### fn spec.volumes.vsphereVolume.withStoragePolicyName + +```ts +withStoragePolicyName(storagePolicyName) +``` + +"storagePolicyName is the storage Policy Based Management (SPBM) profile name." + +### fn spec.volumes.vsphereVolume.withVolumePath + +```ts +withVolumePath(volumePath) +``` + +"volumePath is the path that identifies vSphere volume vmdk" \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1alpha1/index.md b/docs/crossplane/1.17/pkg/v1alpha1/index.md new file mode 100644 index 0000000..afdc113 --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1alpha1/index.md @@ -0,0 +1,9 @@ +--- +permalink: /crossplane/1.17/pkg/v1alpha1/ +--- + +# pkg.v1alpha1 + + + +* [controllerConfig](controllerConfig.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1beta1/deploymentRuntimeConfig.md b/docs/crossplane/1.17/pkg/v1beta1/deploymentRuntimeConfig.md new file mode 100644 index 0000000..09fcf5a --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1beta1/deploymentRuntimeConfig.md @@ -0,0 +1,11245 @@ +--- +permalink: /crossplane/1.17/pkg/v1beta1/deploymentRuntimeConfig/ +--- + +# pkg.v1beta1.deploymentRuntimeConfig + +"The DeploymentRuntimeConfig provides settings for the Kubernetes Deployment\nof a Provider or composition function package.\n\n\nRead the Crossplane documentation for\n[more information about DeploymentRuntimeConfigs](https://docs.crossplane.io/latest/concepts/providers/#runtime-configuration)." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`obj spec.deploymentTemplate`](#obj-specdeploymenttemplate) + * [`obj spec.deploymentTemplate.metadata`](#obj-specdeploymenttemplatemetadata) + * [`fn withAnnotations(annotations)`](#fn-specdeploymenttemplatemetadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-specdeploymenttemplatemetadatawithannotationsmixin) + * [`fn withLabels(labels)`](#fn-specdeploymenttemplatemetadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-specdeploymenttemplatemetadatawithlabelsmixin) + * [`fn withName(name)`](#fn-specdeploymenttemplatemetadatawithname) + * [`obj spec.deploymentTemplate.spec`](#obj-specdeploymenttemplatespec) + * [`fn withMinReadySeconds(minReadySeconds)`](#fn-specdeploymenttemplatespecwithminreadyseconds) + * [`fn withPaused(paused)`](#fn-specdeploymenttemplatespecwithpaused) + * [`fn withProgressDeadlineSeconds(progressDeadlineSeconds)`](#fn-specdeploymenttemplatespecwithprogressdeadlineseconds) + * [`fn withReplicas(replicas)`](#fn-specdeploymenttemplatespecwithreplicas) + * [`fn withRevisionHistoryLimit(revisionHistoryLimit)`](#fn-specdeploymenttemplatespecwithrevisionhistorylimit) + * [`obj spec.deploymentTemplate.spec.selector`](#obj-specdeploymenttemplatespecselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespecselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespecselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specdeploymenttemplatespecselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specdeploymenttemplatespecselectorwithmatchlabelsmixin) + * [`obj spec.deploymentTemplate.spec.selector.matchExpressions`](#obj-specdeploymenttemplatespecselectormatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespecselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespecselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespecselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespecselectormatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.strategy`](#obj-specdeploymenttemplatespecstrategy) + * [`fn withType(type)`](#fn-specdeploymenttemplatespecstrategywithtype) + * [`obj spec.deploymentTemplate.spec.strategy.rollingUpdate`](#obj-specdeploymenttemplatespecstrategyrollingupdate) + * [`fn withMaxSurge(maxSurge)`](#fn-specdeploymenttemplatespecstrategyrollingupdatewithmaxsurge) + * [`fn withMaxUnavailable(maxUnavailable)`](#fn-specdeploymenttemplatespecstrategyrollingupdatewithmaxunavailable) + * [`obj spec.deploymentTemplate.spec.template`](#obj-specdeploymenttemplatespectemplate) + * [`obj spec.deploymentTemplate.spec.template.metadata`](#obj-specdeploymenttemplatespectemplatemetadata) + * [`fn withAnnotations(annotations)`](#fn-specdeploymenttemplatespectemplatemetadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-specdeploymenttemplatespectemplatemetadatawithannotationsmixin) + * [`fn withFinalizers(finalizers)`](#fn-specdeploymenttemplatespectemplatemetadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-specdeploymenttemplatespectemplatemetadatawithfinalizersmixin) + * [`fn withLabels(labels)`](#fn-specdeploymenttemplatespectemplatemetadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-specdeploymenttemplatespectemplatemetadatawithlabelsmixin) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatemetadatawithname) + * [`fn withNamespace(namespace)`](#fn-specdeploymenttemplatespectemplatemetadatawithnamespace) + * [`obj spec.deploymentTemplate.spec.template.spec`](#obj-specdeploymenttemplatespectemplatespec) + * [`fn withActiveDeadlineSeconds(activeDeadlineSeconds)`](#fn-specdeploymenttemplatespectemplatespecwithactivedeadlineseconds) + * [`fn withAutomountServiceAccountToken(automountServiceAccountToken)`](#fn-specdeploymenttemplatespectemplatespecwithautomountserviceaccounttoken) + * [`fn withContainers(containers)`](#fn-specdeploymenttemplatespectemplatespecwithcontainers) + * [`fn withContainersMixin(containers)`](#fn-specdeploymenttemplatespectemplatespecwithcontainersmixin) + * [`fn withDnsPolicy(dnsPolicy)`](#fn-specdeploymenttemplatespectemplatespecwithdnspolicy) + * [`fn withEnableServiceLinks(enableServiceLinks)`](#fn-specdeploymenttemplatespectemplatespecwithenableservicelinks) + * [`fn withEphemeralContainers(ephemeralContainers)`](#fn-specdeploymenttemplatespectemplatespecwithephemeralcontainers) + * [`fn withEphemeralContainersMixin(ephemeralContainers)`](#fn-specdeploymenttemplatespectemplatespecwithephemeralcontainersmixin) + * [`fn withHostAliases(hostAliases)`](#fn-specdeploymenttemplatespectemplatespecwithhostaliases) + * [`fn withHostAliasesMixin(hostAliases)`](#fn-specdeploymenttemplatespectemplatespecwithhostaliasesmixin) + * [`fn withHostIPC(hostIPC)`](#fn-specdeploymenttemplatespectemplatespecwithhostipc) + * [`fn withHostNetwork(hostNetwork)`](#fn-specdeploymenttemplatespectemplatespecwithhostnetwork) + * [`fn withHostPID(hostPID)`](#fn-specdeploymenttemplatespectemplatespecwithhostpid) + * [`fn withHostUsers(hostUsers)`](#fn-specdeploymenttemplatespectemplatespecwithhostusers) + * [`fn withHostname(hostname)`](#fn-specdeploymenttemplatespectemplatespecwithhostname) + * [`fn withImagePullSecrets(imagePullSecrets)`](#fn-specdeploymenttemplatespectemplatespecwithimagepullsecrets) + * [`fn withImagePullSecretsMixin(imagePullSecrets)`](#fn-specdeploymenttemplatespectemplatespecwithimagepullsecretsmixin) + * [`fn withInitContainers(initContainers)`](#fn-specdeploymenttemplatespectemplatespecwithinitcontainers) + * [`fn withInitContainersMixin(initContainers)`](#fn-specdeploymenttemplatespectemplatespecwithinitcontainersmixin) + * [`fn withNodeName(nodeName)`](#fn-specdeploymenttemplatespectemplatespecwithnodename) + * [`fn withNodeSelector(nodeSelector)`](#fn-specdeploymenttemplatespectemplatespecwithnodeselector) + * [`fn withNodeSelectorMixin(nodeSelector)`](#fn-specdeploymenttemplatespectemplatespecwithnodeselectormixin) + * [`fn withOverhead(overhead)`](#fn-specdeploymenttemplatespectemplatespecwithoverhead) + * [`fn withOverheadMixin(overhead)`](#fn-specdeploymenttemplatespectemplatespecwithoverheadmixin) + * [`fn withPreemptionPolicy(preemptionPolicy)`](#fn-specdeploymenttemplatespectemplatespecwithpreemptionpolicy) + * [`fn withPriority(priority)`](#fn-specdeploymenttemplatespectemplatespecwithpriority) + * [`fn withPriorityClassName(priorityClassName)`](#fn-specdeploymenttemplatespectemplatespecwithpriorityclassname) + * [`fn withReadinessGates(readinessGates)`](#fn-specdeploymenttemplatespectemplatespecwithreadinessgates) + * [`fn withReadinessGatesMixin(readinessGates)`](#fn-specdeploymenttemplatespectemplatespecwithreadinessgatesmixin) + * [`fn withResourceClaims(resourceClaims)`](#fn-specdeploymenttemplatespectemplatespecwithresourceclaims) + * [`fn withResourceClaimsMixin(resourceClaims)`](#fn-specdeploymenttemplatespectemplatespecwithresourceclaimsmixin) + * [`fn withRestartPolicy(restartPolicy)`](#fn-specdeploymenttemplatespectemplatespecwithrestartpolicy) + * [`fn withRuntimeClassName(runtimeClassName)`](#fn-specdeploymenttemplatespectemplatespecwithruntimeclassname) + * [`fn withSchedulerName(schedulerName)`](#fn-specdeploymenttemplatespectemplatespecwithschedulername) + * [`fn withSchedulingGates(schedulingGates)`](#fn-specdeploymenttemplatespectemplatespecwithschedulinggates) + * [`fn withSchedulingGatesMixin(schedulingGates)`](#fn-specdeploymenttemplatespectemplatespecwithschedulinggatesmixin) + * [`fn withServiceAccount(serviceAccount)`](#fn-specdeploymenttemplatespectemplatespecwithserviceaccount) + * [`fn withServiceAccountName(serviceAccountName)`](#fn-specdeploymenttemplatespectemplatespecwithserviceaccountname) + * [`fn withSetHostnameAsFQDN(setHostnameAsFQDN)`](#fn-specdeploymenttemplatespectemplatespecwithsethostnameasfqdn) + * [`fn withShareProcessNamespace(shareProcessNamespace)`](#fn-specdeploymenttemplatespectemplatespecwithshareprocessnamespace) + * [`fn withSubdomain(subdomain)`](#fn-specdeploymenttemplatespectemplatespecwithsubdomain) + * [`fn withTerminationGracePeriodSeconds(terminationGracePeriodSeconds)`](#fn-specdeploymenttemplatespectemplatespecwithterminationgraceperiodseconds) + * [`fn withTolerations(tolerations)`](#fn-specdeploymenttemplatespectemplatespecwithtolerations) + * [`fn withTolerationsMixin(tolerations)`](#fn-specdeploymenttemplatespectemplatespecwithtolerationsmixin) + * [`fn withTopologySpreadConstraints(topologySpreadConstraints)`](#fn-specdeploymenttemplatespectemplatespecwithtopologyspreadconstraints) + * [`fn withTopologySpreadConstraintsMixin(topologySpreadConstraints)`](#fn-specdeploymenttemplatespectemplatespecwithtopologyspreadconstraintsmixin) + * [`fn withVolumes(volumes)`](#fn-specdeploymenttemplatespectemplatespecwithvolumes) + * [`fn withVolumesMixin(volumes)`](#fn-specdeploymenttemplatespectemplatespecwithvolumesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity`](#obj-specdeploymenttemplatespectemplatespecaffinity) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity`](#obj-specdeploymenttemplatespectemplatespecaffinitynodeaffinity) + * [`fn withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitywithpreferredduringschedulingignoredduringexecution) + * [`fn withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitywithpreferredduringschedulingignoredduringexecutionmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution`](#obj-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecution) + * [`fn withWeight(weight)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionwithweight) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference`](#obj-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreference) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencewithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencewithmatchexpressionsmixin) + * [`fn withMatchFields(matchFields)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencewithmatchfields) + * [`fn withMatchFieldsMixin(matchFields)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencewithmatchfieldsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions`](#obj-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields`](#obj-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchfields) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchfieldswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchfieldswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchfieldswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionpreferencematchfieldswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution`](#obj-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecution) + * [`fn withNodeSelectorTerms(nodeSelectorTerms)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionwithnodeselectorterms) + * [`fn withNodeSelectorTermsMixin(nodeSelectorTerms)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionwithnodeselectortermsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms`](#obj-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectorterms) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermswithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermswithmatchexpressionsmixin) + * [`fn withMatchFields(matchFields)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermswithmatchfields) + * [`fn withMatchFieldsMixin(matchFields)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermswithmatchfieldsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions`](#obj-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields`](#obj-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchfields) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchfieldswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchfieldswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchfieldswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsmatchfieldswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity`](#obj-specdeploymenttemplatespectemplatespecaffinitypodaffinity) + * [`fn withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitywithpreferredduringschedulingignoredduringexecution) + * [`fn withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitywithpreferredduringschedulingignoredduringexecutionmixin) + * [`fn withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitywithrequiredduringschedulingignoredduringexecution) + * [`fn withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitywithrequiredduringschedulingignoredduringexecutionmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution`](#obj-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecution) + * [`fn withWeight(weight)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionwithweight) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm`](#obj-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinityterm) + * [`fn withMatchLabelKeys(matchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmatchlabelkeys) + * [`fn withMatchLabelKeysMixin(matchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmatchlabelkeysmixin) + * [`fn withMismatchLabelKeys(mismatchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmismatchlabelkeys) + * [`fn withMismatchLabelKeysMixin(mismatchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmismatchlabelkeysmixin) + * [`fn withNamespaces(namespaces)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithnamespaces) + * [`fn withNamespacesMixin(namespaces)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithnamespacesmixin) + * [`fn withTopologyKey(topologyKey)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithtopologykey) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector`](#obj-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchlabelsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions`](#obj-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector`](#obj-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchlabelsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions`](#obj-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution`](#obj-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecution) + * [`fn withMatchLabelKeys(matchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithmatchlabelkeys) + * [`fn withMatchLabelKeysMixin(matchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithmatchlabelkeysmixin) + * [`fn withMismatchLabelKeys(mismatchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithmismatchlabelkeys) + * [`fn withMismatchLabelKeysMixin(mismatchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithmismatchlabelkeysmixin) + * [`fn withNamespaces(namespaces)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithnamespaces) + * [`fn withNamespacesMixin(namespaces)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithnamespacesmixin) + * [`fn withTopologyKey(topologyKey)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionwithtopologykey) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector`](#obj-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchlabelsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions`](#obj-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector`](#obj-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchlabelsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions`](#obj-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity`](#obj-specdeploymenttemplatespectemplatespecaffinitypodantiaffinity) + * [`fn withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitywithpreferredduringschedulingignoredduringexecution) + * [`fn withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitywithpreferredduringschedulingignoredduringexecutionmixin) + * [`fn withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitywithrequiredduringschedulingignoredduringexecution) + * [`fn withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitywithrequiredduringschedulingignoredduringexecutionmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution`](#obj-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecution) + * [`fn withWeight(weight)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionwithweight) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm`](#obj-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinityterm) + * [`fn withMatchLabelKeys(matchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmatchlabelkeys) + * [`fn withMatchLabelKeysMixin(matchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmatchlabelkeysmixin) + * [`fn withMismatchLabelKeys(mismatchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmismatchlabelkeys) + * [`fn withMismatchLabelKeysMixin(mismatchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithmismatchlabelkeysmixin) + * [`fn withNamespaces(namespaces)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithnamespaces) + * [`fn withNamespacesMixin(namespaces)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithnamespacesmixin) + * [`fn withTopologyKey(topologyKey)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermwithtopologykey) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector`](#obj-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectorwithmatchlabelsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions`](#obj-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermlabelselectormatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector`](#obj-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectorwithmatchlabelsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions`](#obj-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionpodaffinitytermnamespaceselectormatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution`](#obj-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecution) + * [`fn withMatchLabelKeys(matchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithmatchlabelkeys) + * [`fn withMatchLabelKeysMixin(matchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithmatchlabelkeysmixin) + * [`fn withMismatchLabelKeys(mismatchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithmismatchlabelkeys) + * [`fn withMismatchLabelKeysMixin(mismatchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithmismatchlabelkeysmixin) + * [`fn withNamespaces(namespaces)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithnamespaces) + * [`fn withNamespacesMixin(namespaces)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithnamespacesmixin) + * [`fn withTopologyKey(topologyKey)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionwithtopologykey) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector`](#obj-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectorwithmatchlabelsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions`](#obj-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionlabelselectormatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector`](#obj-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectorwithmatchlabelsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions`](#obj-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionnamespaceselectormatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.containers`](#obj-specdeploymenttemplatespectemplatespeccontainers) + * [`fn withArgs(args)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithargs) + * [`fn withArgsMixin(args)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithargsmixin) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithcommandmixin) + * [`fn withEnv(env)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithenv) + * [`fn withEnvFrom(envFrom)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithenvfrom) + * [`fn withEnvFromMixin(envFrom)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithenvfrommixin) + * [`fn withEnvMixin(env)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithenvmixin) + * [`fn withImage(image)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithimage) + * [`fn withImagePullPolicy(imagePullPolicy)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithimagepullpolicy) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithname) + * [`fn withPorts(ports)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithports) + * [`fn withPortsMixin(ports)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithportsmixin) + * [`fn withResizePolicy(resizePolicy)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithresizepolicy) + * [`fn withResizePolicyMixin(resizePolicy)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithresizepolicymixin) + * [`fn withRestartPolicy(restartPolicy)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithrestartpolicy) + * [`fn withStdin(stdin)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithstdin) + * [`fn withStdinOnce(stdinOnce)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithstdinonce) + * [`fn withTerminationMessagePath(terminationMessagePath)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithterminationmessagepath) + * [`fn withTerminationMessagePolicy(terminationMessagePolicy)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithterminationmessagepolicy) + * [`fn withTty(tty)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithtty) + * [`fn withVolumeDevices(volumeDevices)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithvolumedevices) + * [`fn withVolumeDevicesMixin(volumeDevices)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithvolumedevicesmixin) + * [`fn withVolumeMounts(volumeMounts)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithvolumemounts) + * [`fn withVolumeMountsMixin(volumeMounts)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithvolumemountsmixin) + * [`fn withWorkingDir(workingDir)`](#fn-specdeploymenttemplatespectemplatespeccontainerswithworkingdir) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.env`](#obj-specdeploymenttemplatespectemplatespeccontainersenv) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvwithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvwithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom`](#obj-specdeploymenttemplatespectemplatespeccontainersenvvaluefrom) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.configMapKeyRef`](#obj-specdeploymenttemplatespectemplatespeccontainersenvvaluefromconfigmapkeyref) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvvaluefromconfigmapkeyrefwithkey) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvvaluefromconfigmapkeyrefwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvvaluefromconfigmapkeyrefwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.fieldRef`](#obj-specdeploymenttemplatespectemplatespeccontainersenvvaluefromfieldref) + * [`fn withApiVersion(apiVersion)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvvaluefromfieldrefwithapiversion) + * [`fn withFieldPath(fieldPath)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvvaluefromfieldrefwithfieldpath) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.resourceFieldRef`](#obj-specdeploymenttemplatespectemplatespeccontainersenvvaluefromresourcefieldref) + * [`fn withContainerName(containerName)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvvaluefromresourcefieldrefwithcontainername) + * [`fn withDivisor(divisor)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvvaluefromresourcefieldrefwithdivisor) + * [`fn withResource(resource)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvvaluefromresourcefieldrefwithresource) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.secretKeyRef`](#obj-specdeploymenttemplatespectemplatespeccontainersenvvaluefromsecretkeyref) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvvaluefromsecretkeyrefwithkey) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvvaluefromsecretkeyrefwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvvaluefromsecretkeyrefwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.envFrom`](#obj-specdeploymenttemplatespectemplatespeccontainersenvfrom) + * [`fn withPrefix(prefix)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvfromwithprefix) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.envFrom.configMapRef`](#obj-specdeploymenttemplatespectemplatespeccontainersenvfromconfigmapref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvfromconfigmaprefwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvfromconfigmaprefwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.envFrom.secretRef`](#obj-specdeploymenttemplatespectemplatespeccontainersenvfromsecretref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvfromsecretrefwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespeccontainersenvfromsecretrefwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecycle) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststart) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.exec`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststartexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststartexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststartexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.httpGet`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarthttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarthttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarthttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarthttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarthttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarthttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarthttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarthttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarthttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarthttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.sleep`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststartsleep) + * [`fn withSeconds(seconds)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststartsleepwithseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.tcpSocket`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarttcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarttcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecyclepoststarttcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecycleprestop) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.exec`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecycleprestopexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestopexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestopexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.httpGet`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecycleprestophttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestophttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestophttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestophttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestophttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestophttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestophttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecycleprestophttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestophttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestophttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.sleep`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecycleprestopsleep) + * [`fn withSeconds(seconds)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestopsleepwithseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.tcpSocket`](#obj-specdeploymenttemplatespectemplatespeccontainerslifecycleprestoptcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestoptcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainerslifecycleprestoptcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.livenessProbe`](#obj-specdeploymenttemplatespectemplatespeccontainerslivenessprobe) + * [`fn withFailureThreshold(failureThreshold)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobewithfailurethreshold) + * [`fn withInitialDelaySeconds(initialDelaySeconds)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobewithinitialdelayseconds) + * [`fn withPeriodSeconds(periodSeconds)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobewithperiodseconds) + * [`fn withSuccessThreshold(successThreshold)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobewithsuccessthreshold) + * [`fn withTerminationGracePeriodSeconds(terminationGracePeriodSeconds)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobewithterminationgraceperiodseconds) + * [`fn withTimeoutSeconds(timeoutSeconds)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobewithtimeoutseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.exec`](#obj-specdeploymenttemplatespectemplatespeccontainerslivenessprobeexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobeexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobeexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.grpc`](#obj-specdeploymenttemplatespectemplatespeccontainerslivenessprobegrpc) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobegrpcwithport) + * [`fn withService(service)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobegrpcwithservice) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.httpGet`](#obj-specdeploymenttemplatespectemplatespeccontainerslivenessprobehttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobehttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobehttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobehttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobehttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobehttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobehttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespeccontainerslivenessprobehttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobehttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobehttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.tcpSocket`](#obj-specdeploymenttemplatespectemplatespeccontainerslivenessprobetcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobetcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainerslivenessprobetcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.ports`](#obj-specdeploymenttemplatespectemplatespeccontainersports) + * [`fn withContainerPort(containerPort)`](#fn-specdeploymenttemplatespectemplatespeccontainersportswithcontainerport) + * [`fn withHostIP(hostIP)`](#fn-specdeploymenttemplatespectemplatespeccontainersportswithhostip) + * [`fn withHostPort(hostPort)`](#fn-specdeploymenttemplatespectemplatespeccontainersportswithhostport) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainersportswithname) + * [`fn withProtocol(protocol)`](#fn-specdeploymenttemplatespectemplatespeccontainersportswithprotocol) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.readinessProbe`](#obj-specdeploymenttemplatespectemplatespeccontainersreadinessprobe) + * [`fn withFailureThreshold(failureThreshold)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobewithfailurethreshold) + * [`fn withInitialDelaySeconds(initialDelaySeconds)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobewithinitialdelayseconds) + * [`fn withPeriodSeconds(periodSeconds)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobewithperiodseconds) + * [`fn withSuccessThreshold(successThreshold)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobewithsuccessthreshold) + * [`fn withTerminationGracePeriodSeconds(terminationGracePeriodSeconds)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobewithterminationgraceperiodseconds) + * [`fn withTimeoutSeconds(timeoutSeconds)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobewithtimeoutseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.exec`](#obj-specdeploymenttemplatespectemplatespeccontainersreadinessprobeexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobeexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobeexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.grpc`](#obj-specdeploymenttemplatespectemplatespeccontainersreadinessprobegrpc) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobegrpcwithport) + * [`fn withService(service)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobegrpcwithservice) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.httpGet`](#obj-specdeploymenttemplatespectemplatespeccontainersreadinessprobehttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobehttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobehttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobehttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobehttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobehttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobehttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespeccontainersreadinessprobehttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobehttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobehttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.tcpSocket`](#obj-specdeploymenttemplatespectemplatespeccontainersreadinessprobetcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobetcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainersreadinessprobetcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.resizePolicy`](#obj-specdeploymenttemplatespectemplatespeccontainersresizepolicy) + * [`fn withResourceName(resourceName)`](#fn-specdeploymenttemplatespectemplatespeccontainersresizepolicywithresourcename) + * [`fn withRestartPolicy(restartPolicy)`](#fn-specdeploymenttemplatespectemplatespeccontainersresizepolicywithrestartpolicy) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.resources`](#obj-specdeploymenttemplatespectemplatespeccontainersresources) + * [`fn withClaims(claims)`](#fn-specdeploymenttemplatespectemplatespeccontainersresourceswithclaims) + * [`fn withClaimsMixin(claims)`](#fn-specdeploymenttemplatespectemplatespeccontainersresourceswithclaimsmixin) + * [`fn withLimits(limits)`](#fn-specdeploymenttemplatespectemplatespeccontainersresourceswithlimits) + * [`fn withLimitsMixin(limits)`](#fn-specdeploymenttemplatespectemplatespeccontainersresourceswithlimitsmixin) + * [`fn withRequests(requests)`](#fn-specdeploymenttemplatespectemplatespeccontainersresourceswithrequests) + * [`fn withRequestsMixin(requests)`](#fn-specdeploymenttemplatespectemplatespeccontainersresourceswithrequestsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.resources.claims`](#obj-specdeploymenttemplatespectemplatespeccontainersresourcesclaims) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainersresourcesclaimswithname) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.securityContext`](#obj-specdeploymenttemplatespectemplatespeccontainerssecuritycontext) + * [`fn withAllowPrivilegeEscalation(allowPrivilegeEscalation)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextwithallowprivilegeescalation) + * [`fn withPrivileged(privileged)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextwithprivileged) + * [`fn withProcMount(procMount)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextwithprocmount) + * [`fn withReadOnlyRootFilesystem(readOnlyRootFilesystem)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextwithreadonlyrootfilesystem) + * [`fn withRunAsGroup(runAsGroup)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextwithrunasgroup) + * [`fn withRunAsNonRoot(runAsNonRoot)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextwithrunasnonroot) + * [`fn withRunAsUser(runAsUser)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextwithrunasuser) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.securityContext.appArmorProfile`](#obj-specdeploymenttemplatespectemplatespeccontainerssecuritycontextapparmorprofile) + * [`fn withLocalhostProfile(localhostProfile)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextapparmorprofilewithlocalhostprofile) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextapparmorprofilewithtype) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.securityContext.capabilities`](#obj-specdeploymenttemplatespectemplatespeccontainerssecuritycontextcapabilities) + * [`fn withAdd(add)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextcapabilitieswithadd) + * [`fn withAddMixin(add)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextcapabilitieswithaddmixin) + * [`fn withDrop(drop)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextcapabilitieswithdrop) + * [`fn withDropMixin(drop)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextcapabilitieswithdropmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.securityContext.seLinuxOptions`](#obj-specdeploymenttemplatespectemplatespeccontainerssecuritycontextselinuxoptions) + * [`fn withLevel(level)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextselinuxoptionswithlevel) + * [`fn withRole(role)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextselinuxoptionswithrole) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextselinuxoptionswithtype) + * [`fn withUser(user)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextselinuxoptionswithuser) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.securityContext.seccompProfile`](#obj-specdeploymenttemplatespectemplatespeccontainerssecuritycontextseccompprofile) + * [`fn withLocalhostProfile(localhostProfile)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextseccompprofilewithlocalhostprofile) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextseccompprofilewithtype) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.securityContext.windowsOptions`](#obj-specdeploymenttemplatespectemplatespeccontainerssecuritycontextwindowsoptions) + * [`fn withGmsaCredentialSpec(gmsaCredentialSpec)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextwindowsoptionswithgmsacredentialspec) + * [`fn withGmsaCredentialSpecName(gmsaCredentialSpecName)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextwindowsoptionswithgmsacredentialspecname) + * [`fn withHostProcess(hostProcess)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextwindowsoptionswithhostprocess) + * [`fn withRunAsUserName(runAsUserName)`](#fn-specdeploymenttemplatespectemplatespeccontainerssecuritycontextwindowsoptionswithrunasusername) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.startupProbe`](#obj-specdeploymenttemplatespectemplatespeccontainersstartupprobe) + * [`fn withFailureThreshold(failureThreshold)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobewithfailurethreshold) + * [`fn withInitialDelaySeconds(initialDelaySeconds)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobewithinitialdelayseconds) + * [`fn withPeriodSeconds(periodSeconds)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobewithperiodseconds) + * [`fn withSuccessThreshold(successThreshold)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobewithsuccessthreshold) + * [`fn withTerminationGracePeriodSeconds(terminationGracePeriodSeconds)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobewithterminationgraceperiodseconds) + * [`fn withTimeoutSeconds(timeoutSeconds)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobewithtimeoutseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.startupProbe.exec`](#obj-specdeploymenttemplatespectemplatespeccontainersstartupprobeexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobeexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobeexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.startupProbe.grpc`](#obj-specdeploymenttemplatespectemplatespeccontainersstartupprobegrpc) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobegrpcwithport) + * [`fn withService(service)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobegrpcwithservice) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.startupProbe.httpGet`](#obj-specdeploymenttemplatespectemplatespeccontainersstartupprobehttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobehttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobehttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobehttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobehttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobehttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobehttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.startupProbe.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespeccontainersstartupprobehttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobehttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobehttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.startupProbe.tcpSocket`](#obj-specdeploymenttemplatespectemplatespeccontainersstartupprobetcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobetcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespeccontainersstartupprobetcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.volumeDevices`](#obj-specdeploymenttemplatespectemplatespeccontainersvolumedevices) + * [`fn withDevicePath(devicePath)`](#fn-specdeploymenttemplatespectemplatespeccontainersvolumedeviceswithdevicepath) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainersvolumedeviceswithname) + * [`obj spec.deploymentTemplate.spec.template.spec.containers.volumeMounts`](#obj-specdeploymenttemplatespectemplatespeccontainersvolumemounts) + * [`fn withMountPath(mountPath)`](#fn-specdeploymenttemplatespectemplatespeccontainersvolumemountswithmountpath) + * [`fn withMountPropagation(mountPropagation)`](#fn-specdeploymenttemplatespectemplatespeccontainersvolumemountswithmountpropagation) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespeccontainersvolumemountswithname) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespeccontainersvolumemountswithreadonly) + * [`fn withRecursiveReadOnly(recursiveReadOnly)`](#fn-specdeploymenttemplatespectemplatespeccontainersvolumemountswithrecursivereadonly) + * [`fn withSubPath(subPath)`](#fn-specdeploymenttemplatespectemplatespeccontainersvolumemountswithsubpath) + * [`fn withSubPathExpr(subPathExpr)`](#fn-specdeploymenttemplatespectemplatespeccontainersvolumemountswithsubpathexpr) + * [`obj spec.deploymentTemplate.spec.template.spec.dnsConfig`](#obj-specdeploymenttemplatespectemplatespecdnsconfig) + * [`fn withNameservers(nameservers)`](#fn-specdeploymenttemplatespectemplatespecdnsconfigwithnameservers) + * [`fn withNameserversMixin(nameservers)`](#fn-specdeploymenttemplatespectemplatespecdnsconfigwithnameserversmixin) + * [`fn withOptions(options)`](#fn-specdeploymenttemplatespectemplatespecdnsconfigwithoptions) + * [`fn withOptionsMixin(options)`](#fn-specdeploymenttemplatespectemplatespecdnsconfigwithoptionsmixin) + * [`fn withSearches(searches)`](#fn-specdeploymenttemplatespectemplatespecdnsconfigwithsearches) + * [`fn withSearchesMixin(searches)`](#fn-specdeploymenttemplatespectemplatespecdnsconfigwithsearchesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.dnsConfig.options`](#obj-specdeploymenttemplatespectemplatespecdnsconfigoptions) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecdnsconfigoptionswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecdnsconfigoptionswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainers) + * [`fn withArgs(args)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithargs) + * [`fn withArgsMixin(args)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithargsmixin) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithcommandmixin) + * [`fn withEnv(env)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithenv) + * [`fn withEnvFrom(envFrom)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithenvfrom) + * [`fn withEnvFromMixin(envFrom)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithenvfrommixin) + * [`fn withEnvMixin(env)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithenvmixin) + * [`fn withImage(image)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithimage) + * [`fn withImagePullPolicy(imagePullPolicy)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithimagepullpolicy) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithname) + * [`fn withPorts(ports)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithports) + * [`fn withPortsMixin(ports)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithportsmixin) + * [`fn withResizePolicy(resizePolicy)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithresizepolicy) + * [`fn withResizePolicyMixin(resizePolicy)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithresizepolicymixin) + * [`fn withRestartPolicy(restartPolicy)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithrestartpolicy) + * [`fn withStdin(stdin)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithstdin) + * [`fn withStdinOnce(stdinOnce)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithstdinonce) + * [`fn withTargetContainerName(targetContainerName)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithtargetcontainername) + * [`fn withTerminationMessagePath(terminationMessagePath)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithterminationmessagepath) + * [`fn withTerminationMessagePolicy(terminationMessagePolicy)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithterminationmessagepolicy) + * [`fn withTty(tty)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithtty) + * [`fn withVolumeDevices(volumeDevices)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithvolumedevices) + * [`fn withVolumeDevicesMixin(volumeDevices)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithvolumedevicesmixin) + * [`fn withVolumeMounts(volumeMounts)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithvolumemounts) + * [`fn withVolumeMountsMixin(volumeMounts)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithvolumemountsmixin) + * [`fn withWorkingDir(workingDir)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerswithworkingdir) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersenv) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvwithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvwithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefrom) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.configMapKeyRef`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromconfigmapkeyref) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromconfigmapkeyrefwithkey) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromconfigmapkeyrefwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromconfigmapkeyrefwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.fieldRef`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromfieldref) + * [`fn withApiVersion(apiVersion)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromfieldrefwithapiversion) + * [`fn withFieldPath(fieldPath)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromfieldrefwithfieldpath) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.resourceFieldRef`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromresourcefieldref) + * [`fn withContainerName(containerName)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromresourcefieldrefwithcontainername) + * [`fn withDivisor(divisor)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromresourcefieldrefwithdivisor) + * [`fn withResource(resource)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromresourcefieldrefwithresource) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.secretKeyRef`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromsecretkeyref) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromsecretkeyrefwithkey) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromsecretkeyrefwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvvaluefromsecretkeyrefwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.envFrom`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersenvfrom) + * [`fn withPrefix(prefix)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvfromwithprefix) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.envFrom.configMapRef`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersenvfromconfigmapref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvfromconfigmaprefwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvfromconfigmaprefwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.envFrom.secretRef`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersenvfromsecretref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvfromsecretrefwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersenvfromsecretrefwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycle) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststart) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.exec`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststartexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststartexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststartexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.httpGet`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarthttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarthttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarthttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarthttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarthttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarthttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarthttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarthttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarthttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarthttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.sleep`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststartsleep) + * [`fn withSeconds(seconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststartsleepwithseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.tcpSocket`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarttcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarttcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecyclepoststarttcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestop) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.exec`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestopexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestopexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestopexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.httpGet`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestophttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestophttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestophttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestophttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestophttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestophttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestophttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestophttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestophttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestophttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.sleep`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestopsleep) + * [`fn withSeconds(seconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestopsleepwithseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.tcpSocket`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestoptcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestoptcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslifecycleprestoptcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobe) + * [`fn withFailureThreshold(failureThreshold)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobewithfailurethreshold) + * [`fn withInitialDelaySeconds(initialDelaySeconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobewithinitialdelayseconds) + * [`fn withPeriodSeconds(periodSeconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobewithperiodseconds) + * [`fn withSuccessThreshold(successThreshold)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobewithsuccessthreshold) + * [`fn withTerminationGracePeriodSeconds(terminationGracePeriodSeconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobewithterminationgraceperiodseconds) + * [`fn withTimeoutSeconds(timeoutSeconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobewithtimeoutseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.exec`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobeexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobeexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobeexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.grpc`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobegrpc) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobegrpcwithport) + * [`fn withService(service)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobegrpcwithservice) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.httpGet`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobehttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobehttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobehttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobehttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobehttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobehttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobehttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobehttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobehttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobehttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.tcpSocket`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobetcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobetcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerslivenessprobetcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.ports`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersports) + * [`fn withContainerPort(containerPort)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersportswithcontainerport) + * [`fn withHostIP(hostIP)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersportswithhostip) + * [`fn withHostPort(hostPort)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersportswithhostport) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersportswithname) + * [`fn withProtocol(protocol)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersportswithprotocol) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobe) + * [`fn withFailureThreshold(failureThreshold)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobewithfailurethreshold) + * [`fn withInitialDelaySeconds(initialDelaySeconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobewithinitialdelayseconds) + * [`fn withPeriodSeconds(periodSeconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobewithperiodseconds) + * [`fn withSuccessThreshold(successThreshold)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobewithsuccessthreshold) + * [`fn withTerminationGracePeriodSeconds(terminationGracePeriodSeconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobewithterminationgraceperiodseconds) + * [`fn withTimeoutSeconds(timeoutSeconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobewithtimeoutseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.exec`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobeexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobeexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobeexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.grpc`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobegrpc) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobegrpcwithport) + * [`fn withService(service)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobegrpcwithservice) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.httpGet`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobehttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobehttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobehttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobehttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobehttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobehttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobehttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobehttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobehttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobehttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.tcpSocket`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobetcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobetcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersreadinessprobetcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resizePolicy`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersresizepolicy) + * [`fn withResourceName(resourceName)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersresizepolicywithresourcename) + * [`fn withRestartPolicy(restartPolicy)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersresizepolicywithrestartpolicy) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resources`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersresources) + * [`fn withClaims(claims)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersresourceswithclaims) + * [`fn withClaimsMixin(claims)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersresourceswithclaimsmixin) + * [`fn withLimits(limits)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersresourceswithlimits) + * [`fn withLimitsMixin(limits)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersresourceswithlimitsmixin) + * [`fn withRequests(requests)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersresourceswithrequests) + * [`fn withRequestsMixin(requests)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersresourceswithrequestsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resources.claims`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersresourcesclaims) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersresourcesclaimswithname) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontext) + * [`fn withAllowPrivilegeEscalation(allowPrivilegeEscalation)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextwithallowprivilegeescalation) + * [`fn withPrivileged(privileged)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextwithprivileged) + * [`fn withProcMount(procMount)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextwithprocmount) + * [`fn withReadOnlyRootFilesystem(readOnlyRootFilesystem)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextwithreadonlyrootfilesystem) + * [`fn withRunAsGroup(runAsGroup)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextwithrunasgroup) + * [`fn withRunAsNonRoot(runAsNonRoot)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextwithrunasnonroot) + * [`fn withRunAsUser(runAsUser)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextwithrunasuser) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.appArmorProfile`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextapparmorprofile) + * [`fn withLocalhostProfile(localhostProfile)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextapparmorprofilewithlocalhostprofile) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextapparmorprofilewithtype) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.capabilities`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextcapabilities) + * [`fn withAdd(add)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextcapabilitieswithadd) + * [`fn withAddMixin(add)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextcapabilitieswithaddmixin) + * [`fn withDrop(drop)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextcapabilitieswithdrop) + * [`fn withDropMixin(drop)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextcapabilitieswithdropmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.seLinuxOptions`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextselinuxoptions) + * [`fn withLevel(level)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextselinuxoptionswithlevel) + * [`fn withRole(role)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextselinuxoptionswithrole) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextselinuxoptionswithtype) + * [`fn withUser(user)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextselinuxoptionswithuser) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.seccompProfile`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextseccompprofile) + * [`fn withLocalhostProfile(localhostProfile)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextseccompprofilewithlocalhostprofile) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextseccompprofilewithtype) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.windowsOptions`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextwindowsoptions) + * [`fn withGmsaCredentialSpec(gmsaCredentialSpec)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextwindowsoptionswithgmsacredentialspec) + * [`fn withGmsaCredentialSpecName(gmsaCredentialSpecName)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextwindowsoptionswithgmsacredentialspecname) + * [`fn withHostProcess(hostProcess)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextwindowsoptionswithhostprocess) + * [`fn withRunAsUserName(runAsUserName)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainerssecuritycontextwindowsoptionswithrunasusername) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobe) + * [`fn withFailureThreshold(failureThreshold)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobewithfailurethreshold) + * [`fn withInitialDelaySeconds(initialDelaySeconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobewithinitialdelayseconds) + * [`fn withPeriodSeconds(periodSeconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobewithperiodseconds) + * [`fn withSuccessThreshold(successThreshold)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobewithsuccessthreshold) + * [`fn withTerminationGracePeriodSeconds(terminationGracePeriodSeconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobewithterminationgraceperiodseconds) + * [`fn withTimeoutSeconds(timeoutSeconds)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobewithtimeoutseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.exec`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobeexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobeexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobeexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.grpc`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobegrpc) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobegrpcwithport) + * [`fn withService(service)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobegrpcwithservice) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.httpGet`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobehttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobehttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobehttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobehttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobehttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobehttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobehttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobehttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobehttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobehttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.tcpSocket`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobetcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobetcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersstartupprobetcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeDevices`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersvolumedevices) + * [`fn withDevicePath(devicePath)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersvolumedeviceswithdevicepath) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersvolumedeviceswithname) + * [`obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeMounts`](#obj-specdeploymenttemplatespectemplatespecephemeralcontainersvolumemounts) + * [`fn withMountPath(mountPath)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersvolumemountswithmountpath) + * [`fn withMountPropagation(mountPropagation)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersvolumemountswithmountpropagation) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersvolumemountswithname) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersvolumemountswithreadonly) + * [`fn withRecursiveReadOnly(recursiveReadOnly)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersvolumemountswithrecursivereadonly) + * [`fn withSubPath(subPath)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersvolumemountswithsubpath) + * [`fn withSubPathExpr(subPathExpr)`](#fn-specdeploymenttemplatespectemplatespecephemeralcontainersvolumemountswithsubpathexpr) + * [`obj spec.deploymentTemplate.spec.template.spec.hostAliases`](#obj-specdeploymenttemplatespectemplatespechostaliases) + * [`fn withHostnames(hostnames)`](#fn-specdeploymenttemplatespectemplatespechostaliaseswithhostnames) + * [`fn withHostnamesMixin(hostnames)`](#fn-specdeploymenttemplatespectemplatespechostaliaseswithhostnamesmixin) + * [`fn withIp(ip)`](#fn-specdeploymenttemplatespectemplatespechostaliaseswithip) + * [`obj spec.deploymentTemplate.spec.template.spec.imagePullSecrets`](#obj-specdeploymenttemplatespectemplatespecimagepullsecrets) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecimagepullsecretswithname) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers`](#obj-specdeploymenttemplatespectemplatespecinitcontainers) + * [`fn withArgs(args)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithargs) + * [`fn withArgsMixin(args)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithargsmixin) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithcommandmixin) + * [`fn withEnv(env)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithenv) + * [`fn withEnvFrom(envFrom)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithenvfrom) + * [`fn withEnvFromMixin(envFrom)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithenvfrommixin) + * [`fn withEnvMixin(env)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithenvmixin) + * [`fn withImage(image)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithimage) + * [`fn withImagePullPolicy(imagePullPolicy)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithimagepullpolicy) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithname) + * [`fn withPorts(ports)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithports) + * [`fn withPortsMixin(ports)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithportsmixin) + * [`fn withResizePolicy(resizePolicy)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithresizepolicy) + * [`fn withResizePolicyMixin(resizePolicy)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithresizepolicymixin) + * [`fn withRestartPolicy(restartPolicy)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithrestartpolicy) + * [`fn withStdin(stdin)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithstdin) + * [`fn withStdinOnce(stdinOnce)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithstdinonce) + * [`fn withTerminationMessagePath(terminationMessagePath)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithterminationmessagepath) + * [`fn withTerminationMessagePolicy(terminationMessagePolicy)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithterminationmessagepolicy) + * [`fn withTty(tty)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithtty) + * [`fn withVolumeDevices(volumeDevices)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithvolumedevices) + * [`fn withVolumeDevicesMixin(volumeDevices)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithvolumedevicesmixin) + * [`fn withVolumeMounts(volumeMounts)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithvolumemounts) + * [`fn withVolumeMountsMixin(volumeMounts)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithvolumemountsmixin) + * [`fn withWorkingDir(workingDir)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerswithworkingdir) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.env`](#obj-specdeploymenttemplatespectemplatespecinitcontainersenv) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvwithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvwithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom`](#obj-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefrom) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.configMapKeyRef`](#obj-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromconfigmapkeyref) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromconfigmapkeyrefwithkey) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromconfigmapkeyrefwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromconfigmapkeyrefwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.fieldRef`](#obj-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromfieldref) + * [`fn withApiVersion(apiVersion)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromfieldrefwithapiversion) + * [`fn withFieldPath(fieldPath)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromfieldrefwithfieldpath) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.resourceFieldRef`](#obj-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromresourcefieldref) + * [`fn withContainerName(containerName)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromresourcefieldrefwithcontainername) + * [`fn withDivisor(divisor)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromresourcefieldrefwithdivisor) + * [`fn withResource(resource)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromresourcefieldrefwithresource) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.secretKeyRef`](#obj-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromsecretkeyref) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromsecretkeyrefwithkey) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromsecretkeyrefwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvvaluefromsecretkeyrefwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.envFrom`](#obj-specdeploymenttemplatespectemplatespecinitcontainersenvfrom) + * [`fn withPrefix(prefix)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvfromwithprefix) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.envFrom.configMapRef`](#obj-specdeploymenttemplatespectemplatespecinitcontainersenvfromconfigmapref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvfromconfigmaprefwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvfromconfigmaprefwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.envFrom.secretRef`](#obj-specdeploymenttemplatespectemplatespecinitcontainersenvfromsecretref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvfromsecretrefwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersenvfromsecretrefwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecycle) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststart) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.exec`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststartexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststartexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststartexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.httpGet`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarthttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarthttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarthttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarthttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarthttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarthttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarthttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarthttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarthttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarthttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.sleep`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststartsleep) + * [`fn withSeconds(seconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststartsleepwithseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.tcpSocket`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarttcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarttcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecyclepoststarttcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestop) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.exec`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestopexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestopexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestopexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.httpGet`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestophttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestophttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestophttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestophttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestophttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestophttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestophttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestophttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestophttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestophttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.sleep`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestopsleep) + * [`fn withSeconds(seconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestopsleepwithseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.tcpSocket`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestoptcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestoptcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslifecycleprestoptcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobe) + * [`fn withFailureThreshold(failureThreshold)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobewithfailurethreshold) + * [`fn withInitialDelaySeconds(initialDelaySeconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobewithinitialdelayseconds) + * [`fn withPeriodSeconds(periodSeconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobewithperiodseconds) + * [`fn withSuccessThreshold(successThreshold)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobewithsuccessthreshold) + * [`fn withTerminationGracePeriodSeconds(terminationGracePeriodSeconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobewithterminationgraceperiodseconds) + * [`fn withTimeoutSeconds(timeoutSeconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobewithtimeoutseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.exec`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobeexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobeexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobeexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.grpc`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobegrpc) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobegrpcwithport) + * [`fn withService(service)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobegrpcwithservice) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.httpGet`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobehttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobehttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobehttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobehttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobehttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobehttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobehttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobehttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobehttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobehttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.tcpSocket`](#obj-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobetcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobetcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerslivenessprobetcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.ports`](#obj-specdeploymenttemplatespectemplatespecinitcontainersports) + * [`fn withContainerPort(containerPort)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersportswithcontainerport) + * [`fn withHostIP(hostIP)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersportswithhostip) + * [`fn withHostPort(hostPort)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersportswithhostport) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersportswithname) + * [`fn withProtocol(protocol)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersportswithprotocol) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe`](#obj-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobe) + * [`fn withFailureThreshold(failureThreshold)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobewithfailurethreshold) + * [`fn withInitialDelaySeconds(initialDelaySeconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobewithinitialdelayseconds) + * [`fn withPeriodSeconds(periodSeconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobewithperiodseconds) + * [`fn withSuccessThreshold(successThreshold)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobewithsuccessthreshold) + * [`fn withTerminationGracePeriodSeconds(terminationGracePeriodSeconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobewithterminationgraceperiodseconds) + * [`fn withTimeoutSeconds(timeoutSeconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobewithtimeoutseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.exec`](#obj-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobeexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobeexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobeexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.grpc`](#obj-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobegrpc) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobegrpcwithport) + * [`fn withService(service)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobegrpcwithservice) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.httpGet`](#obj-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobehttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobehttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobehttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobehttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobehttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobehttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobehttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobehttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobehttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobehttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.tcpSocket`](#obj-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobetcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobetcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersreadinessprobetcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.resizePolicy`](#obj-specdeploymenttemplatespectemplatespecinitcontainersresizepolicy) + * [`fn withResourceName(resourceName)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersresizepolicywithresourcename) + * [`fn withRestartPolicy(restartPolicy)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersresizepolicywithrestartpolicy) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.resources`](#obj-specdeploymenttemplatespectemplatespecinitcontainersresources) + * [`fn withClaims(claims)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersresourceswithclaims) + * [`fn withClaimsMixin(claims)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersresourceswithclaimsmixin) + * [`fn withLimits(limits)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersresourceswithlimits) + * [`fn withLimitsMixin(limits)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersresourceswithlimitsmixin) + * [`fn withRequests(requests)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersresourceswithrequests) + * [`fn withRequestsMixin(requests)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersresourceswithrequestsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.resources.claims`](#obj-specdeploymenttemplatespectemplatespecinitcontainersresourcesclaims) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersresourcesclaimswithname) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.securityContext`](#obj-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontext) + * [`fn withAllowPrivilegeEscalation(allowPrivilegeEscalation)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextwithallowprivilegeescalation) + * [`fn withPrivileged(privileged)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextwithprivileged) + * [`fn withProcMount(procMount)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextwithprocmount) + * [`fn withReadOnlyRootFilesystem(readOnlyRootFilesystem)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextwithreadonlyrootfilesystem) + * [`fn withRunAsGroup(runAsGroup)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextwithrunasgroup) + * [`fn withRunAsNonRoot(runAsNonRoot)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextwithrunasnonroot) + * [`fn withRunAsUser(runAsUser)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextwithrunasuser) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.appArmorProfile`](#obj-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextapparmorprofile) + * [`fn withLocalhostProfile(localhostProfile)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextapparmorprofilewithlocalhostprofile) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextapparmorprofilewithtype) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.capabilities`](#obj-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextcapabilities) + * [`fn withAdd(add)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextcapabilitieswithadd) + * [`fn withAddMixin(add)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextcapabilitieswithaddmixin) + * [`fn withDrop(drop)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextcapabilitieswithdrop) + * [`fn withDropMixin(drop)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextcapabilitieswithdropmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.seLinuxOptions`](#obj-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextselinuxoptions) + * [`fn withLevel(level)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextselinuxoptionswithlevel) + * [`fn withRole(role)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextselinuxoptionswithrole) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextselinuxoptionswithtype) + * [`fn withUser(user)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextselinuxoptionswithuser) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.seccompProfile`](#obj-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextseccompprofile) + * [`fn withLocalhostProfile(localhostProfile)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextseccompprofilewithlocalhostprofile) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextseccompprofilewithtype) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.windowsOptions`](#obj-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextwindowsoptions) + * [`fn withGmsaCredentialSpec(gmsaCredentialSpec)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextwindowsoptionswithgmsacredentialspec) + * [`fn withGmsaCredentialSpecName(gmsaCredentialSpecName)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextwindowsoptionswithgmsacredentialspecname) + * [`fn withHostProcess(hostProcess)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextwindowsoptionswithhostprocess) + * [`fn withRunAsUserName(runAsUserName)`](#fn-specdeploymenttemplatespectemplatespecinitcontainerssecuritycontextwindowsoptionswithrunasusername) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe`](#obj-specdeploymenttemplatespectemplatespecinitcontainersstartupprobe) + * [`fn withFailureThreshold(failureThreshold)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobewithfailurethreshold) + * [`fn withInitialDelaySeconds(initialDelaySeconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobewithinitialdelayseconds) + * [`fn withPeriodSeconds(periodSeconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobewithperiodseconds) + * [`fn withSuccessThreshold(successThreshold)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobewithsuccessthreshold) + * [`fn withTerminationGracePeriodSeconds(terminationGracePeriodSeconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobewithterminationgraceperiodseconds) + * [`fn withTimeoutSeconds(timeoutSeconds)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobewithtimeoutseconds) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.exec`](#obj-specdeploymenttemplatespectemplatespecinitcontainersstartupprobeexec) + * [`fn withCommand(command)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobeexecwithcommand) + * [`fn withCommandMixin(command)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobeexecwithcommandmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.grpc`](#obj-specdeploymenttemplatespectemplatespecinitcontainersstartupprobegrpc) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobegrpcwithport) + * [`fn withService(service)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobegrpcwithservice) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.httpGet`](#obj-specdeploymenttemplatespectemplatespecinitcontainersstartupprobehttpget) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobehttpgetwithhost) + * [`fn withHttpHeaders(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobehttpgetwithhttpheaders) + * [`fn withHttpHeadersMixin(httpHeaders)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobehttpgetwithhttpheadersmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobehttpgetwithpath) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobehttpgetwithport) + * [`fn withScheme(scheme)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobehttpgetwithscheme) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.httpGet.httpHeaders`](#obj-specdeploymenttemplatespectemplatespecinitcontainersstartupprobehttpgethttpheaders) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobehttpgethttpheaderswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobehttpgethttpheaderswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.tcpSocket`](#obj-specdeploymenttemplatespectemplatespecinitcontainersstartupprobetcpsocket) + * [`fn withHost(host)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobetcpsocketwithhost) + * [`fn withPort(port)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersstartupprobetcpsocketwithport) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.volumeDevices`](#obj-specdeploymenttemplatespectemplatespecinitcontainersvolumedevices) + * [`fn withDevicePath(devicePath)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersvolumedeviceswithdevicepath) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersvolumedeviceswithname) + * [`obj spec.deploymentTemplate.spec.template.spec.initContainers.volumeMounts`](#obj-specdeploymenttemplatespectemplatespecinitcontainersvolumemounts) + * [`fn withMountPath(mountPath)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersvolumemountswithmountpath) + * [`fn withMountPropagation(mountPropagation)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersvolumemountswithmountpropagation) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersvolumemountswithname) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersvolumemountswithreadonly) + * [`fn withRecursiveReadOnly(recursiveReadOnly)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersvolumemountswithrecursivereadonly) + * [`fn withSubPath(subPath)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersvolumemountswithsubpath) + * [`fn withSubPathExpr(subPathExpr)`](#fn-specdeploymenttemplatespectemplatespecinitcontainersvolumemountswithsubpathexpr) + * [`obj spec.deploymentTemplate.spec.template.spec.os`](#obj-specdeploymenttemplatespectemplatespecos) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecoswithname) + * [`obj spec.deploymentTemplate.spec.template.spec.readinessGates`](#obj-specdeploymenttemplatespectemplatespecreadinessgates) + * [`fn withConditionType(conditionType)`](#fn-specdeploymenttemplatespectemplatespecreadinessgateswithconditiontype) + * [`obj spec.deploymentTemplate.spec.template.spec.resourceClaims`](#obj-specdeploymenttemplatespectemplatespecresourceclaims) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecresourceclaimswithname) + * [`obj spec.deploymentTemplate.spec.template.spec.resourceClaims.source`](#obj-specdeploymenttemplatespectemplatespecresourceclaimssource) + * [`fn withResourceClaimName(resourceClaimName)`](#fn-specdeploymenttemplatespectemplatespecresourceclaimssourcewithresourceclaimname) + * [`fn withResourceClaimTemplateName(resourceClaimTemplateName)`](#fn-specdeploymenttemplatespectemplatespecresourceclaimssourcewithresourceclaimtemplatename) + * [`obj spec.deploymentTemplate.spec.template.spec.schedulingGates`](#obj-specdeploymenttemplatespectemplatespecschedulinggates) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecschedulinggateswithname) + * [`obj spec.deploymentTemplate.spec.template.spec.securityContext`](#obj-specdeploymenttemplatespectemplatespecsecuritycontext) + * [`fn withFsGroup(fsGroup)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwithfsgroup) + * [`fn withFsGroupChangePolicy(fsGroupChangePolicy)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwithfsgroupchangepolicy) + * [`fn withRunAsGroup(runAsGroup)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwithrunasgroup) + * [`fn withRunAsNonRoot(runAsNonRoot)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwithrunasnonroot) + * [`fn withRunAsUser(runAsUser)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwithrunasuser) + * [`fn withSupplementalGroups(supplementalGroups)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwithsupplementalgroups) + * [`fn withSupplementalGroupsMixin(supplementalGroups)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwithsupplementalgroupsmixin) + * [`fn withSysctls(sysctls)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwithsysctls) + * [`fn withSysctlsMixin(sysctls)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwithsysctlsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.securityContext.appArmorProfile`](#obj-specdeploymenttemplatespectemplatespecsecuritycontextapparmorprofile) + * [`fn withLocalhostProfile(localhostProfile)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextapparmorprofilewithlocalhostprofile) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextapparmorprofilewithtype) + * [`obj spec.deploymentTemplate.spec.template.spec.securityContext.seLinuxOptions`](#obj-specdeploymenttemplatespectemplatespecsecuritycontextselinuxoptions) + * [`fn withLevel(level)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextselinuxoptionswithlevel) + * [`fn withRole(role)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextselinuxoptionswithrole) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextselinuxoptionswithtype) + * [`fn withUser(user)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextselinuxoptionswithuser) + * [`obj spec.deploymentTemplate.spec.template.spec.securityContext.seccompProfile`](#obj-specdeploymenttemplatespectemplatespecsecuritycontextseccompprofile) + * [`fn withLocalhostProfile(localhostProfile)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextseccompprofilewithlocalhostprofile) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextseccompprofilewithtype) + * [`obj spec.deploymentTemplate.spec.template.spec.securityContext.sysctls`](#obj-specdeploymenttemplatespectemplatespecsecuritycontextsysctls) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextsysctlswithname) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextsysctlswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.securityContext.windowsOptions`](#obj-specdeploymenttemplatespectemplatespecsecuritycontextwindowsoptions) + * [`fn withGmsaCredentialSpec(gmsaCredentialSpec)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwindowsoptionswithgmsacredentialspec) + * [`fn withGmsaCredentialSpecName(gmsaCredentialSpecName)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwindowsoptionswithgmsacredentialspecname) + * [`fn withHostProcess(hostProcess)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwindowsoptionswithhostprocess) + * [`fn withRunAsUserName(runAsUserName)`](#fn-specdeploymenttemplatespectemplatespecsecuritycontextwindowsoptionswithrunasusername) + * [`obj spec.deploymentTemplate.spec.template.spec.tolerations`](#obj-specdeploymenttemplatespectemplatespectolerations) + * [`fn withEffect(effect)`](#fn-specdeploymenttemplatespectemplatespectolerationswitheffect) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespectolerationswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespectolerationswithoperator) + * [`fn withTolerationSeconds(tolerationSeconds)`](#fn-specdeploymenttemplatespectemplatespectolerationswithtolerationseconds) + * [`fn withValue(value)`](#fn-specdeploymenttemplatespectemplatespectolerationswithvalue) + * [`obj spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints`](#obj-specdeploymenttemplatespectemplatespectopologyspreadconstraints) + * [`fn withMatchLabelKeys(matchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintswithmatchlabelkeys) + * [`fn withMatchLabelKeysMixin(matchLabelKeys)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintswithmatchlabelkeysmixin) + * [`fn withMaxSkew(maxSkew)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintswithmaxskew) + * [`fn withMinDomains(minDomains)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintswithmindomains) + * [`fn withNodeAffinityPolicy(nodeAffinityPolicy)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintswithnodeaffinitypolicy) + * [`fn withNodeTaintsPolicy(nodeTaintsPolicy)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintswithnodetaintspolicy) + * [`fn withTopologyKey(topologyKey)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintswithtopologykey) + * [`fn withWhenUnsatisfiable(whenUnsatisfiable)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintswithwhenunsatisfiable) + * [`obj spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.labelSelector`](#obj-specdeploymenttemplatespectemplatespectopologyspreadconstraintslabelselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintslabelselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintslabelselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintslabelselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintslabelselectorwithmatchlabelsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.labelSelector.matchExpressions`](#obj-specdeploymenttemplatespectemplatespectopologyspreadconstraintslabelselectormatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintslabelselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintslabelselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintslabelselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespectopologyspreadconstraintslabelselectormatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes`](#obj-specdeploymenttemplatespectemplatespecvolumes) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumeswithname) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.awsElasticBlockStore`](#obj-specdeploymenttemplatespectemplatespecvolumesawselasticblockstore) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumesawselasticblockstorewithfstype) + * [`fn withPartition(partition)`](#fn-specdeploymenttemplatespectemplatespecvolumesawselasticblockstorewithpartition) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesawselasticblockstorewithreadonly) + * [`fn withVolumeID(volumeID)`](#fn-specdeploymenttemplatespectemplatespecvolumesawselasticblockstorewithvolumeid) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.azureDisk`](#obj-specdeploymenttemplatespectemplatespecvolumesazuredisk) + * [`fn withCachingMode(cachingMode)`](#fn-specdeploymenttemplatespectemplatespecvolumesazurediskwithcachingmode) + * [`fn withDiskName(diskName)`](#fn-specdeploymenttemplatespectemplatespecvolumesazurediskwithdiskname) + * [`fn withDiskURI(diskURI)`](#fn-specdeploymenttemplatespectemplatespecvolumesazurediskwithdiskuri) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumesazurediskwithfstype) + * [`fn withKind(kind)`](#fn-specdeploymenttemplatespectemplatespecvolumesazurediskwithkind) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesazurediskwithreadonly) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.azureFile`](#obj-specdeploymenttemplatespectemplatespecvolumesazurefile) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesazurefilewithreadonly) + * [`fn withSecretName(secretName)`](#fn-specdeploymenttemplatespectemplatespecvolumesazurefilewithsecretname) + * [`fn withShareName(shareName)`](#fn-specdeploymenttemplatespectemplatespecvolumesazurefilewithsharename) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.cephfs`](#obj-specdeploymenttemplatespectemplatespecvolumescephfs) + * [`fn withMonitors(monitors)`](#fn-specdeploymenttemplatespectemplatespecvolumescephfswithmonitors) + * [`fn withMonitorsMixin(monitors)`](#fn-specdeploymenttemplatespectemplatespecvolumescephfswithmonitorsmixin) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecvolumescephfswithpath) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumescephfswithreadonly) + * [`fn withSecretFile(secretFile)`](#fn-specdeploymenttemplatespectemplatespecvolumescephfswithsecretfile) + * [`fn withUser(user)`](#fn-specdeploymenttemplatespectemplatespecvolumescephfswithuser) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.cephfs.secretRef`](#obj-specdeploymenttemplatespectemplatespecvolumescephfssecretref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumescephfssecretrefwithname) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.cinder`](#obj-specdeploymenttemplatespectemplatespecvolumescinder) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumescinderwithfstype) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumescinderwithreadonly) + * [`fn withVolumeID(volumeID)`](#fn-specdeploymenttemplatespectemplatespecvolumescinderwithvolumeid) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.cinder.secretRef`](#obj-specdeploymenttemplatespectemplatespecvolumescindersecretref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumescindersecretrefwithname) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.configMap`](#obj-specdeploymenttemplatespectemplatespecvolumesconfigmap) + * [`fn withDefaultMode(defaultMode)`](#fn-specdeploymenttemplatespectemplatespecvolumesconfigmapwithdefaultmode) + * [`fn withItems(items)`](#fn-specdeploymenttemplatespectemplatespecvolumesconfigmapwithitems) + * [`fn withItemsMixin(items)`](#fn-specdeploymenttemplatespectemplatespecvolumesconfigmapwithitemsmixin) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumesconfigmapwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecvolumesconfigmapwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.configMap.items`](#obj-specdeploymenttemplatespectemplatespecvolumesconfigmapitems) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecvolumesconfigmapitemswithkey) + * [`fn withMode(mode)`](#fn-specdeploymenttemplatespectemplatespecvolumesconfigmapitemswithmode) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecvolumesconfigmapitemswithpath) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.csi`](#obj-specdeploymenttemplatespectemplatespecvolumescsi) + * [`fn withDriver(driver)`](#fn-specdeploymenttemplatespectemplatespecvolumescsiwithdriver) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumescsiwithfstype) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumescsiwithreadonly) + * [`fn withVolumeAttributes(volumeAttributes)`](#fn-specdeploymenttemplatespectemplatespecvolumescsiwithvolumeattributes) + * [`fn withVolumeAttributesMixin(volumeAttributes)`](#fn-specdeploymenttemplatespectemplatespecvolumescsiwithvolumeattributesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.csi.nodePublishSecretRef`](#obj-specdeploymenttemplatespectemplatespecvolumescsinodepublishsecretref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumescsinodepublishsecretrefwithname) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI`](#obj-specdeploymenttemplatespectemplatespecvolumesdownwardapi) + * [`fn withDefaultMode(defaultMode)`](#fn-specdeploymenttemplatespectemplatespecvolumesdownwardapiwithdefaultmode) + * [`fn withItems(items)`](#fn-specdeploymenttemplatespectemplatespecvolumesdownwardapiwithitems) + * [`fn withItemsMixin(items)`](#fn-specdeploymenttemplatespectemplatespecvolumesdownwardapiwithitemsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items`](#obj-specdeploymenttemplatespectemplatespecvolumesdownwardapiitems) + * [`fn withMode(mode)`](#fn-specdeploymenttemplatespectemplatespecvolumesdownwardapiitemswithmode) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecvolumesdownwardapiitemswithpath) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items.fieldRef`](#obj-specdeploymenttemplatespectemplatespecvolumesdownwardapiitemsfieldref) + * [`fn withApiVersion(apiVersion)`](#fn-specdeploymenttemplatespectemplatespecvolumesdownwardapiitemsfieldrefwithapiversion) + * [`fn withFieldPath(fieldPath)`](#fn-specdeploymenttemplatespectemplatespecvolumesdownwardapiitemsfieldrefwithfieldpath) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items.resourceFieldRef`](#obj-specdeploymenttemplatespectemplatespecvolumesdownwardapiitemsresourcefieldref) + * [`fn withContainerName(containerName)`](#fn-specdeploymenttemplatespectemplatespecvolumesdownwardapiitemsresourcefieldrefwithcontainername) + * [`fn withDivisor(divisor)`](#fn-specdeploymenttemplatespectemplatespecvolumesdownwardapiitemsresourcefieldrefwithdivisor) + * [`fn withResource(resource)`](#fn-specdeploymenttemplatespectemplatespecvolumesdownwardapiitemsresourcefieldrefwithresource) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.emptyDir`](#obj-specdeploymenttemplatespectemplatespecvolumesemptydir) + * [`fn withMedium(medium)`](#fn-specdeploymenttemplatespectemplatespecvolumesemptydirwithmedium) + * [`fn withSizeLimit(sizeLimit)`](#fn-specdeploymenttemplatespectemplatespecvolumesemptydirwithsizelimit) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral`](#obj-specdeploymenttemplatespectemplatespecvolumesephemeral) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate`](#obj-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplate) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.metadata`](#obj-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatemetadata) + * [`fn withAnnotations(annotations)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatemetadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatemetadatawithannotationsmixin) + * [`fn withFinalizers(finalizers)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatemetadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatemetadatawithfinalizersmixin) + * [`fn withLabels(labels)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatemetadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatemetadatawithlabelsmixin) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatemetadatawithname) + * [`fn withNamespace(namespace)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatemetadatawithnamespace) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec`](#obj-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespec) + * [`fn withAccessModes(accessModes)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecwithaccessmodes) + * [`fn withAccessModesMixin(accessModes)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecwithaccessmodesmixin) + * [`fn withStorageClassName(storageClassName)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecwithstorageclassname) + * [`fn withVolumeAttributesClassName(volumeAttributesClassName)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecwithvolumeattributesclassname) + * [`fn withVolumeMode(volumeMode)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecwithvolumemode) + * [`fn withVolumeName(volumeName)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecwithvolumename) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSource`](#obj-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecdatasource) + * [`fn withApiGroup(apiGroup)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecdatasourcewithapigroup) + * [`fn withKind(kind)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecdatasourcewithkind) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecdatasourcewithname) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSourceRef`](#obj-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecdatasourceref) + * [`fn withApiGroup(apiGroup)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecdatasourcerefwithapigroup) + * [`fn withKind(kind)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecdatasourcerefwithkind) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecdatasourcerefwithname) + * [`fn withNamespace(namespace)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecdatasourcerefwithnamespace) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.resources`](#obj-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecresources) + * [`fn withLimits(limits)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecresourceswithlimits) + * [`fn withLimitsMixin(limits)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecresourceswithlimitsmixin) + * [`fn withRequests(requests)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecresourceswithrequests) + * [`fn withRequestsMixin(requests)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecresourceswithrequestsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.selector`](#obj-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecselectorwithmatchlabelsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.matchExpressions`](#obj-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecselectormatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecvolumesephemeralvolumeclaimtemplatespecselectormatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.fc`](#obj-specdeploymenttemplatespectemplatespecvolumesfc) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumesfcwithfstype) + * [`fn withLun(lun)`](#fn-specdeploymenttemplatespectemplatespecvolumesfcwithlun) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesfcwithreadonly) + * [`fn withTargetWWNs(targetWWNs)`](#fn-specdeploymenttemplatespectemplatespecvolumesfcwithtargetwwns) + * [`fn withTargetWWNsMixin(targetWWNs)`](#fn-specdeploymenttemplatespectemplatespecvolumesfcwithtargetwwnsmixin) + * [`fn withWwids(wwids)`](#fn-specdeploymenttemplatespectemplatespecvolumesfcwithwwids) + * [`fn withWwidsMixin(wwids)`](#fn-specdeploymenttemplatespectemplatespecvolumesfcwithwwidsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.flexVolume`](#obj-specdeploymenttemplatespectemplatespecvolumesflexvolume) + * [`fn withDriver(driver)`](#fn-specdeploymenttemplatespectemplatespecvolumesflexvolumewithdriver) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumesflexvolumewithfstype) + * [`fn withOptions(options)`](#fn-specdeploymenttemplatespectemplatespecvolumesflexvolumewithoptions) + * [`fn withOptionsMixin(options)`](#fn-specdeploymenttemplatespectemplatespecvolumesflexvolumewithoptionsmixin) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesflexvolumewithreadonly) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.flexVolume.secretRef`](#obj-specdeploymenttemplatespectemplatespecvolumesflexvolumesecretref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumesflexvolumesecretrefwithname) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.flocker`](#obj-specdeploymenttemplatespectemplatespecvolumesflocker) + * [`fn withDatasetName(datasetName)`](#fn-specdeploymenttemplatespectemplatespecvolumesflockerwithdatasetname) + * [`fn withDatasetUUID(datasetUUID)`](#fn-specdeploymenttemplatespectemplatespecvolumesflockerwithdatasetuuid) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.gcePersistentDisk`](#obj-specdeploymenttemplatespectemplatespecvolumesgcepersistentdisk) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumesgcepersistentdiskwithfstype) + * [`fn withPartition(partition)`](#fn-specdeploymenttemplatespectemplatespecvolumesgcepersistentdiskwithpartition) + * [`fn withPdName(pdName)`](#fn-specdeploymenttemplatespectemplatespecvolumesgcepersistentdiskwithpdname) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesgcepersistentdiskwithreadonly) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.gitRepo`](#obj-specdeploymenttemplatespectemplatespecvolumesgitrepo) + * [`fn withDirectory(directory)`](#fn-specdeploymenttemplatespectemplatespecvolumesgitrepowithdirectory) + * [`fn withRepository(repository)`](#fn-specdeploymenttemplatespectemplatespecvolumesgitrepowithrepository) + * [`fn withRevision(revision)`](#fn-specdeploymenttemplatespectemplatespecvolumesgitrepowithrevision) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.glusterfs`](#obj-specdeploymenttemplatespectemplatespecvolumesglusterfs) + * [`fn withEndpoints(endpoints)`](#fn-specdeploymenttemplatespectemplatespecvolumesglusterfswithendpoints) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecvolumesglusterfswithpath) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesglusterfswithreadonly) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.hostPath`](#obj-specdeploymenttemplatespectemplatespecvolumeshostpath) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecvolumeshostpathwithpath) + * [`fn withType(type)`](#fn-specdeploymenttemplatespectemplatespecvolumeshostpathwithtype) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.iscsi`](#obj-specdeploymenttemplatespectemplatespecvolumesiscsi) + * [`fn withChapAuthDiscovery(chapAuthDiscovery)`](#fn-specdeploymenttemplatespectemplatespecvolumesiscsiwithchapauthdiscovery) + * [`fn withChapAuthSession(chapAuthSession)`](#fn-specdeploymenttemplatespectemplatespecvolumesiscsiwithchapauthsession) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumesiscsiwithfstype) + * [`fn withInitiatorName(initiatorName)`](#fn-specdeploymenttemplatespectemplatespecvolumesiscsiwithinitiatorname) + * [`fn withIqn(iqn)`](#fn-specdeploymenttemplatespectemplatespecvolumesiscsiwithiqn) + * [`fn withIscsiInterface(iscsiInterface)`](#fn-specdeploymenttemplatespectemplatespecvolumesiscsiwithiscsiinterface) + * [`fn withLun(lun)`](#fn-specdeploymenttemplatespectemplatespecvolumesiscsiwithlun) + * [`fn withPortals(portals)`](#fn-specdeploymenttemplatespectemplatespecvolumesiscsiwithportals) + * [`fn withPortalsMixin(portals)`](#fn-specdeploymenttemplatespectemplatespecvolumesiscsiwithportalsmixin) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesiscsiwithreadonly) + * [`fn withTargetPortal(targetPortal)`](#fn-specdeploymenttemplatespectemplatespecvolumesiscsiwithtargetportal) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.iscsi.secretRef`](#obj-specdeploymenttemplatespectemplatespecvolumesiscsisecretref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumesiscsisecretrefwithname) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.nfs`](#obj-specdeploymenttemplatespectemplatespecvolumesnfs) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecvolumesnfswithpath) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesnfswithreadonly) + * [`fn withServer(server)`](#fn-specdeploymenttemplatespectemplatespecvolumesnfswithserver) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.persistentVolumeClaim`](#obj-specdeploymenttemplatespectemplatespecvolumespersistentvolumeclaim) + * [`fn withClaimName(claimName)`](#fn-specdeploymenttemplatespectemplatespecvolumespersistentvolumeclaimwithclaimname) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumespersistentvolumeclaimwithreadonly) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.photonPersistentDisk`](#obj-specdeploymenttemplatespectemplatespecvolumesphotonpersistentdisk) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumesphotonpersistentdiskwithfstype) + * [`fn withPdID(pdID)`](#fn-specdeploymenttemplatespectemplatespecvolumesphotonpersistentdiskwithpdid) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.portworxVolume`](#obj-specdeploymenttemplatespectemplatespecvolumesportworxvolume) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumesportworxvolumewithfstype) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesportworxvolumewithreadonly) + * [`fn withVolumeID(volumeID)`](#fn-specdeploymenttemplatespectemplatespecvolumesportworxvolumewithvolumeid) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected`](#obj-specdeploymenttemplatespectemplatespecvolumesprojected) + * [`fn withDefaultMode(defaultMode)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedwithdefaultmode) + * [`fn withSources(sources)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedwithsources) + * [`fn withSourcesMixin(sources)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedwithsourcesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsources) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundle) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlewithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlewithoptional) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlewithpath) + * [`fn withSignerName(signerName)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlewithsignername) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.labelSelector`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlelabelselector) + * [`fn withMatchExpressions(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlelabelselectorwithmatchexpressions) + * [`fn withMatchExpressionsMixin(matchExpressions)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlelabelselectorwithmatchexpressionsmixin) + * [`fn withMatchLabels(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlelabelselectorwithmatchlabels) + * [`fn withMatchLabelsMixin(matchLabels)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlelabelselectorwithmatchlabelsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlelabelselectormatchexpressions) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlelabelselectormatchexpressionswithkey) + * [`fn withOperator(operator)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlelabelselectormatchexpressionswithoperator) + * [`fn withValues(values)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlelabelselectormatchexpressionswithvalues) + * [`fn withValuesMixin(values)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesclustertrustbundlelabelselectormatchexpressionswithvaluesmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.configMap`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesconfigmap) + * [`fn withItems(items)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesconfigmapwithitems) + * [`fn withItemsMixin(items)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesconfigmapwithitemsmixin) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesconfigmapwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesconfigmapwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.configMap.items`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesconfigmapitems) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesconfigmapitemswithkey) + * [`fn withMode(mode)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesconfigmapitemswithmode) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesconfigmapitemswithpath) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapi) + * [`fn withItems(items)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapiwithitems) + * [`fn withItemsMixin(items)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapiwithitemsmixin) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapiitems) + * [`fn withMode(mode)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapiitemswithmode) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapiitemswithpath) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items.fieldRef`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapiitemsfieldref) + * [`fn withApiVersion(apiVersion)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapiitemsfieldrefwithapiversion) + * [`fn withFieldPath(fieldPath)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapiitemsfieldrefwithfieldpath) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items.resourceFieldRef`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapiitemsresourcefieldref) + * [`fn withContainerName(containerName)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapiitemsresourcefieldrefwithcontainername) + * [`fn withDivisor(divisor)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapiitemsresourcefieldrefwithdivisor) + * [`fn withResource(resource)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesdownwardapiitemsresourcefieldrefwithresource) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.secret`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsourcessecret) + * [`fn withItems(items)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcessecretwithitems) + * [`fn withItemsMixin(items)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcessecretwithitemsmixin) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcessecretwithname) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcessecretwithoptional) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.secret.items`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsourcessecretitems) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcessecretitemswithkey) + * [`fn withMode(mode)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcessecretitemswithmode) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcessecretitemswithpath) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.serviceAccountToken`](#obj-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesserviceaccounttoken) + * [`fn withAudience(audience)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesserviceaccounttokenwithaudience) + * [`fn withExpirationSeconds(expirationSeconds)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesserviceaccounttokenwithexpirationseconds) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecvolumesprojectedsourcesserviceaccounttokenwithpath) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.quobyte`](#obj-specdeploymenttemplatespectemplatespecvolumesquobyte) + * [`fn withGroup(group)`](#fn-specdeploymenttemplatespectemplatespecvolumesquobytewithgroup) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesquobytewithreadonly) + * [`fn withRegistry(registry)`](#fn-specdeploymenttemplatespectemplatespecvolumesquobytewithregistry) + * [`fn withTenant(tenant)`](#fn-specdeploymenttemplatespectemplatespecvolumesquobytewithtenant) + * [`fn withUser(user)`](#fn-specdeploymenttemplatespectemplatespecvolumesquobytewithuser) + * [`fn withVolume(volume)`](#fn-specdeploymenttemplatespectemplatespecvolumesquobytewithvolume) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.rbd`](#obj-specdeploymenttemplatespectemplatespecvolumesrbd) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumesrbdwithfstype) + * [`fn withImage(image)`](#fn-specdeploymenttemplatespectemplatespecvolumesrbdwithimage) + * [`fn withKeyring(keyring)`](#fn-specdeploymenttemplatespectemplatespecvolumesrbdwithkeyring) + * [`fn withMonitors(monitors)`](#fn-specdeploymenttemplatespectemplatespecvolumesrbdwithmonitors) + * [`fn withMonitorsMixin(monitors)`](#fn-specdeploymenttemplatespectemplatespecvolumesrbdwithmonitorsmixin) + * [`fn withPool(pool)`](#fn-specdeploymenttemplatespectemplatespecvolumesrbdwithpool) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesrbdwithreadonly) + * [`fn withUser(user)`](#fn-specdeploymenttemplatespectemplatespecvolumesrbdwithuser) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.rbd.secretRef`](#obj-specdeploymenttemplatespectemplatespecvolumesrbdsecretref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumesrbdsecretrefwithname) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.scaleIO`](#obj-specdeploymenttemplatespectemplatespecvolumesscaleio) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumesscaleiowithfstype) + * [`fn withGateway(gateway)`](#fn-specdeploymenttemplatespectemplatespecvolumesscaleiowithgateway) + * [`fn withProtectionDomain(protectionDomain)`](#fn-specdeploymenttemplatespectemplatespecvolumesscaleiowithprotectiondomain) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesscaleiowithreadonly) + * [`fn withSslEnabled(sslEnabled)`](#fn-specdeploymenttemplatespectemplatespecvolumesscaleiowithsslenabled) + * [`fn withStorageMode(storageMode)`](#fn-specdeploymenttemplatespectemplatespecvolumesscaleiowithstoragemode) + * [`fn withStoragePool(storagePool)`](#fn-specdeploymenttemplatespectemplatespecvolumesscaleiowithstoragepool) + * [`fn withSystem(system)`](#fn-specdeploymenttemplatespectemplatespecvolumesscaleiowithsystem) + * [`fn withVolumeName(volumeName)`](#fn-specdeploymenttemplatespectemplatespecvolumesscaleiowithvolumename) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.scaleIO.secretRef`](#obj-specdeploymenttemplatespectemplatespecvolumesscaleiosecretref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumesscaleiosecretrefwithname) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.secret`](#obj-specdeploymenttemplatespectemplatespecvolumessecret) + * [`fn withDefaultMode(defaultMode)`](#fn-specdeploymenttemplatespectemplatespecvolumessecretwithdefaultmode) + * [`fn withItems(items)`](#fn-specdeploymenttemplatespectemplatespecvolumessecretwithitems) + * [`fn withItemsMixin(items)`](#fn-specdeploymenttemplatespectemplatespecvolumessecretwithitemsmixin) + * [`fn withOptional(optional)`](#fn-specdeploymenttemplatespectemplatespecvolumessecretwithoptional) + * [`fn withSecretName(secretName)`](#fn-specdeploymenttemplatespectemplatespecvolumessecretwithsecretname) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.secret.items`](#obj-specdeploymenttemplatespectemplatespecvolumessecretitems) + * [`fn withKey(key)`](#fn-specdeploymenttemplatespectemplatespecvolumessecretitemswithkey) + * [`fn withMode(mode)`](#fn-specdeploymenttemplatespectemplatespecvolumessecretitemswithmode) + * [`fn withPath(path)`](#fn-specdeploymenttemplatespectemplatespecvolumessecretitemswithpath) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.storageos`](#obj-specdeploymenttemplatespectemplatespecvolumesstorageos) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumesstorageoswithfstype) + * [`fn withReadOnly(readOnly)`](#fn-specdeploymenttemplatespectemplatespecvolumesstorageoswithreadonly) + * [`fn withVolumeName(volumeName)`](#fn-specdeploymenttemplatespectemplatespecvolumesstorageoswithvolumename) + * [`fn withVolumeNamespace(volumeNamespace)`](#fn-specdeploymenttemplatespectemplatespecvolumesstorageoswithvolumenamespace) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.storageos.secretRef`](#obj-specdeploymenttemplatespectemplatespecvolumesstorageossecretref) + * [`fn withName(name)`](#fn-specdeploymenttemplatespectemplatespecvolumesstorageossecretrefwithname) + * [`obj spec.deploymentTemplate.spec.template.spec.volumes.vsphereVolume`](#obj-specdeploymenttemplatespectemplatespecvolumesvspherevolume) + * [`fn withFsType(fsType)`](#fn-specdeploymenttemplatespectemplatespecvolumesvspherevolumewithfstype) + * [`fn withStoragePolicyID(storagePolicyID)`](#fn-specdeploymenttemplatespectemplatespecvolumesvspherevolumewithstoragepolicyid) + * [`fn withStoragePolicyName(storagePolicyName)`](#fn-specdeploymenttemplatespectemplatespecvolumesvspherevolumewithstoragepolicyname) + * [`fn withVolumePath(volumePath)`](#fn-specdeploymenttemplatespectemplatespecvolumesvspherevolumewithvolumepath) + * [`obj spec.serviceAccountTemplate`](#obj-specserviceaccounttemplate) + * [`obj spec.serviceAccountTemplate.metadata`](#obj-specserviceaccounttemplatemetadata) + * [`fn withAnnotations(annotations)`](#fn-specserviceaccounttemplatemetadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-specserviceaccounttemplatemetadatawithannotationsmixin) + * [`fn withLabels(labels)`](#fn-specserviceaccounttemplatemetadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-specserviceaccounttemplatemetadatawithlabelsmixin) + * [`fn withName(name)`](#fn-specserviceaccounttemplatemetadatawithname) + * [`obj spec.serviceTemplate`](#obj-specservicetemplate) + * [`obj spec.serviceTemplate.metadata`](#obj-specservicetemplatemetadata) + * [`fn withAnnotations(annotations)`](#fn-specservicetemplatemetadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-specservicetemplatemetadatawithannotationsmixin) + * [`fn withLabels(labels)`](#fn-specservicetemplatemetadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-specservicetemplatemetadatawithlabelsmixin) + * [`fn withName(name)`](#fn-specservicetemplatemetadatawithname) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of DeploymentRuntimeConfig + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"DeploymentRuntimeConfigSpec specifies the configuration for a packaged controller.\nValues provided will override package manager defaults. Labels and\nannotations are passed to both the controller Deployment and ServiceAccount." + +## obj spec.deploymentTemplate + +"DeploymentTemplate is the template for the Deployment object." + +## obj spec.deploymentTemplate.metadata + +"Metadata contains the configurable metadata fields for the Deployment." + +### fn spec.deploymentTemplate.metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that\nmay be set by external tools to store and retrieve arbitrary metadata.\nThey are not queryable and should be preserved when modifying objects.\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/" + +### fn spec.deploymentTemplate.metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that\nmay be set by external tools to store and retrieve arbitrary metadata.\nThey are not queryable and should be preserved when modifying objects.\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. Labels will be merged with internal labels\nused by crossplane, and labels with a crossplane.io key might be\noverwritten.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +### fn spec.deploymentTemplate.metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. Labels will be merged with internal labels\nused by crossplane, and labels with a crossplane.io key might be\noverwritten.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.metadata.withName + +```ts +withName(name) +``` + +"Name is the name of the object." + +## obj spec.deploymentTemplate.spec + +"Spec contains the configurable spec fields for the Deployment object." + +### fn spec.deploymentTemplate.spec.withMinReadySeconds + +```ts +withMinReadySeconds(minReadySeconds) +``` + +"Minimum number of seconds for which a newly created pod should be ready\nwithout any of its container crashing, for it to be considered available.\nDefaults to 0 (pod will be considered available as soon as it is ready)" + +### fn spec.deploymentTemplate.spec.withPaused + +```ts +withPaused(paused) +``` + +"Indicates that the deployment is paused." + +### fn spec.deploymentTemplate.spec.withProgressDeadlineSeconds + +```ts +withProgressDeadlineSeconds(progressDeadlineSeconds) +``` + +"The maximum time in seconds for a deployment to make progress before it\nis considered to be failed. The deployment controller will continue to\nprocess failed deployments and a condition with a ProgressDeadlineExceeded\nreason will be surfaced in the deployment status. Note that progress will\nnot be estimated during the time a deployment is paused. Defaults to 600s." + +### fn spec.deploymentTemplate.spec.withReplicas + +```ts +withReplicas(replicas) +``` + +"Number of desired pods. This is a pointer to distinguish between explicit\nzero and not specified. Defaults to 1." + +### fn spec.deploymentTemplate.spec.withRevisionHistoryLimit + +```ts +withRevisionHistoryLimit(revisionHistoryLimit) +``` + +"The number of old ReplicaSets to retain to allow rollback.\nThis is a pointer to distinguish between explicit zero and not specified.\nDefaults to 10." + +## obj spec.deploymentTemplate.spec.selector + +"Label selector for pods. Existing ReplicaSets whose pods are\nselected by this will be the ones affected by this deployment.\nIt must match the pod template's labels." + +### fn spec.deploymentTemplate.spec.selector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.selector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.selector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.selector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.selector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.selector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.selector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.deploymentTemplate.spec.selector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.deploymentTemplate.spec.selector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.strategy + +"The deployment strategy to use to replace existing pods with new ones." + +### fn spec.deploymentTemplate.spec.strategy.withType + +```ts +withType(type) +``` + +"Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate." + +## obj spec.deploymentTemplate.spec.strategy.rollingUpdate + +"Rolling update config params. Present only if DeploymentStrategyType =\nRollingUpdate.\n---\nTODO: Update this to follow our convention for oneOf, whatever we decide it\nto be." + +### fn spec.deploymentTemplate.spec.strategy.rollingUpdate.withMaxSurge + +```ts +withMaxSurge(maxSurge) +``` + +"The maximum number of pods that can be scheduled above the desired number of\npods.\nValue can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).\nThis can not be 0 if MaxUnavailable is 0.\nAbsolute number is calculated from percentage by rounding up.\nDefaults to 25%.\nExample: when this is set to 30%, the new ReplicaSet can be scaled up immediately when\nthe rolling update starts, such that the total number of old and new pods do not exceed\n130% of desired pods. Once old pods have been killed,\nnew ReplicaSet can be scaled up further, ensuring that total number of pods running\nat any time during the update is at most 130% of desired pods." + +### fn spec.deploymentTemplate.spec.strategy.rollingUpdate.withMaxUnavailable + +```ts +withMaxUnavailable(maxUnavailable) +``` + +"The maximum number of pods that can be unavailable during the update.\nValue can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).\nAbsolute number is calculated from percentage by rounding down.\nThis can not be 0 if MaxSurge is 0.\nDefaults to 25%.\nExample: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods\nimmediately when the rolling update starts. Once new pods are ready, old ReplicaSet\ncan be scaled down further, followed by scaling up the new ReplicaSet, ensuring\nthat the total number of pods available at all times during the update is at\nleast 70% of desired pods." + +## obj spec.deploymentTemplate.spec.template + +"Template describes the pods that will be created.\nThe only allowed template.spec.restartPolicy value is \"Always\"." + +## obj spec.deploymentTemplate.spec.template.metadata + +"Standard object's metadata.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + +### fn spec.deploymentTemplate.spec.template.metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + + + +### fn spec.deploymentTemplate.spec.template.metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + + + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + + + +### fn spec.deploymentTemplate.spec.template.metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + + + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.metadata.withLabels + +```ts +withLabels(labels) +``` + + + +### fn spec.deploymentTemplate.spec.template.metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + + + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.metadata.withName + +```ts +withName(name) +``` + + + +### fn spec.deploymentTemplate.spec.template.metadata.withNamespace + +```ts +withNamespace(namespace) +``` + + + +## obj spec.deploymentTemplate.spec.template.spec + +"Specification of the desired behavior of the pod.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + +### fn spec.deploymentTemplate.spec.template.spec.withActiveDeadlineSeconds + +```ts +withActiveDeadlineSeconds(activeDeadlineSeconds) +``` + +"Optional duration in seconds the pod may be active on the node relative to\nStartTime before the system will actively try to mark it failed and kill associated containers.\nValue must be a positive integer." + +### fn spec.deploymentTemplate.spec.template.spec.withAutomountServiceAccountToken + +```ts +withAutomountServiceAccountToken(automountServiceAccountToken) +``` + +"AutomountServiceAccountToken indicates whether a service account token should be automatically mounted." + +### fn spec.deploymentTemplate.spec.template.spec.withContainers + +```ts +withContainers(containers) +``` + +"List of containers belonging to the pod.\nContainers cannot currently be added or removed.\nThere must be at least one container in a Pod.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.withContainersMixin + +```ts +withContainersMixin(containers) +``` + +"List of containers belonging to the pod.\nContainers cannot currently be added or removed.\nThere must be at least one container in a Pod.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.withDnsPolicy + +```ts +withDnsPolicy(dnsPolicy) +``` + +"Set DNS policy for the pod.\nDefaults to \"ClusterFirst\".\nValid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.\nDNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.\nTo have DNS options set along with hostNetwork, you have to specify DNS policy\nexplicitly to 'ClusterFirstWithHostNet'." + +### fn spec.deploymentTemplate.spec.template.spec.withEnableServiceLinks + +```ts +withEnableServiceLinks(enableServiceLinks) +``` + +"EnableServiceLinks indicates whether information about services should be injected into pod's\nenvironment variables, matching the syntax of Docker links.\nOptional: Defaults to true." + +### fn spec.deploymentTemplate.spec.template.spec.withEphemeralContainers + +```ts +withEphemeralContainers(ephemeralContainers) +``` + +"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\npod to perform user-initiated actions such as debugging. This list cannot be specified when\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource." + +### fn spec.deploymentTemplate.spec.template.spec.withEphemeralContainersMixin + +```ts +withEphemeralContainersMixin(ephemeralContainers) +``` + +"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\npod to perform user-initiated actions such as debugging. This list cannot be specified when\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.withHostAliases + +```ts +withHostAliases(hostAliases) +``` + +"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts\nfile if specified." + +### fn spec.deploymentTemplate.spec.template.spec.withHostAliasesMixin + +```ts +withHostAliasesMixin(hostAliases) +``` + +"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts\nfile if specified." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.withHostIPC + +```ts +withHostIPC(hostIPC) +``` + +"Use the host's ipc namespace.\nOptional: Default to false." + +### fn spec.deploymentTemplate.spec.template.spec.withHostNetwork + +```ts +withHostNetwork(hostNetwork) +``` + +"Host networking requested for this pod. Use the host's network namespace.\nIf this option is set, the ports that will be used must be specified.\nDefault to false." + +### fn spec.deploymentTemplate.spec.template.spec.withHostPID + +```ts +withHostPID(hostPID) +``` + +"Use the host's pid namespace.\nOptional: Default to false." + +### fn spec.deploymentTemplate.spec.template.spec.withHostUsers + +```ts +withHostUsers(hostUsers) +``` + +"Use the host's user namespace.\nOptional: Default to true.\nIf set to true or not present, the pod will be run in the host user namespace, useful\nfor when the pod needs a feature only available to the host user namespace, such as\nloading a kernel module with CAP_SYS_MODULE.\nWhen set to false, a new userns is created for the pod. Setting false is useful for\nmitigating container breakout vulnerabilities even allowing users to run their\ncontainers as root without actually having root privileges on the host.\nThis field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature." + +### fn spec.deploymentTemplate.spec.template.spec.withHostname + +```ts +withHostname(hostname) +``` + +"Specifies the hostname of the Pod\nIf not specified, the pod's hostname will be set to a system-defined value." + +### fn spec.deploymentTemplate.spec.template.spec.withImagePullSecrets + +```ts +withImagePullSecrets(imagePullSecrets) +``` + +"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\nIf specified, these secrets will be passed to individual puller implementations for them to use.\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod" + +### fn spec.deploymentTemplate.spec.template.spec.withImagePullSecretsMixin + +```ts +withImagePullSecretsMixin(imagePullSecrets) +``` + +"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\nIf specified, these secrets will be passed to individual puller implementations for them to use.\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.withInitContainers + +```ts +withInitContainers(initContainers) +``` + +"List of initialization containers belonging to the pod.\nInit containers are executed in order prior to containers being started. If any\ninit container fails, the pod is considered to have failed and is handled according\nto its restartPolicy. The name for an init container or normal container must be\nunique among all containers.\nInit containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.\nThe resourceRequirements of an init container are taken into account during scheduling\nby finding the highest request/limit for each resource type, and then using the max of\nof that value or the sum of the normal containers. Limits are applied to init containers\nin a similar fashion.\nInit containers cannot currently be added or removed.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/" + +### fn spec.deploymentTemplate.spec.template.spec.withInitContainersMixin + +```ts +withInitContainersMixin(initContainers) +``` + +"List of initialization containers belonging to the pod.\nInit containers are executed in order prior to containers being started. If any\ninit container fails, the pod is considered to have failed and is handled according\nto its restartPolicy. The name for an init container or normal container must be\nunique among all containers.\nInit containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.\nThe resourceRequirements of an init container are taken into account during scheduling\nby finding the highest request/limit for each resource type, and then using the max of\nof that value or the sum of the normal containers. Limits are applied to init containers\nin a similar fashion.\nInit containers cannot currently be added or removed.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.withNodeName + +```ts +withNodeName(nodeName) +``` + +"NodeName is a request to schedule this pod onto a specific node. If it is non-empty,\nthe scheduler simply schedules this pod onto that node, assuming that it fits resource\nrequirements." + +### fn spec.deploymentTemplate.spec.template.spec.withNodeSelector + +```ts +withNodeSelector(nodeSelector) +``` + +"NodeSelector is a selector which must be true for the pod to fit on a node.\nSelector which must match a node's labels for the pod to be scheduled on that node.\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" + +### fn spec.deploymentTemplate.spec.template.spec.withNodeSelectorMixin + +```ts +withNodeSelectorMixin(nodeSelector) +``` + +"NodeSelector is a selector which must be true for the pod to fit on a node.\nSelector which must match a node's labels for the pod to be scheduled on that node.\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.withOverhead + +```ts +withOverhead(overhead) +``` + +"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.\nThis field will be autopopulated at admission time by the RuntimeClass admission controller. If\nthe RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.\nThe RuntimeClass admission controller will reject Pod create requests which have the overhead already\nset. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value\ndefined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md" + +### fn spec.deploymentTemplate.spec.template.spec.withOverheadMixin + +```ts +withOverheadMixin(overhead) +``` + +"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.\nThis field will be autopopulated at admission time by the RuntimeClass admission controller. If\nthe RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.\nThe RuntimeClass admission controller will reject Pod create requests which have the overhead already\nset. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value\ndefined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.withPreemptionPolicy + +```ts +withPreemptionPolicy(preemptionPolicy) +``` + +"PreemptionPolicy is the Policy for preempting pods with lower priority.\nOne of Never, PreemptLowerPriority.\nDefaults to PreemptLowerPriority if unset." + +### fn spec.deploymentTemplate.spec.template.spec.withPriority + +```ts +withPriority(priority) +``` + +"The priority value. Various system components use this field to find the\npriority of the pod. When Priority Admission Controller is enabled, it\nprevents users from setting this field. The admission controller populates\nthis field from PriorityClassName.\nThe higher the value, the higher the priority." + +### fn spec.deploymentTemplate.spec.template.spec.withPriorityClassName + +```ts +withPriorityClassName(priorityClassName) +``` + +"If specified, indicates the pod's priority. \"system-node-critical\" and\n\"system-cluster-critical\" are two special keywords which indicate the\nhighest priorities with the former being the highest priority. Any other\nname must be defined by creating a PriorityClass object with that name.\nIf not specified, the pod priority will be default or zero if there is no\ndefault." + +### fn spec.deploymentTemplate.spec.template.spec.withReadinessGates + +```ts +withReadinessGates(readinessGates) +``` + +"If specified, all readiness gates will be evaluated for pod readiness.\nA pod is ready when all its containers are ready AND\nall conditions specified in the readiness gates have status equal to \"True\"\nMore info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates" + +### fn spec.deploymentTemplate.spec.template.spec.withReadinessGatesMixin + +```ts +withReadinessGatesMixin(readinessGates) +``` + +"If specified, all readiness gates will be evaluated for pod readiness.\nA pod is ready when all its containers are ready AND\nall conditions specified in the readiness gates have status equal to \"True\"\nMore info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.withResourceClaims + +```ts +withResourceClaims(resourceClaims) +``` + +"ResourceClaims defines which ResourceClaims must be allocated\nand reserved before the Pod is allowed to start. The resources\nwill be made available to those containers which consume them\nby name.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable." + +### fn spec.deploymentTemplate.spec.template.spec.withResourceClaimsMixin + +```ts +withResourceClaimsMixin(resourceClaims) +``` + +"ResourceClaims defines which ResourceClaims must be allocated\nand reserved before the Pod is allowed to start. The resources\nwill be made available to those containers which consume them\nby name.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.withRestartPolicy + +```ts +withRestartPolicy(restartPolicy) +``` + +"Restart policy for all containers within the pod.\nOne of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.\nDefault to Always.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy" + +### fn spec.deploymentTemplate.spec.template.spec.withRuntimeClassName + +```ts +withRuntimeClassName(runtimeClassName) +``` + +"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used\nto run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.\nIf unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an\nempty definition that uses the default runtime handler.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class" + +### fn spec.deploymentTemplate.spec.template.spec.withSchedulerName + +```ts +withSchedulerName(schedulerName) +``` + +"If specified, the pod will be dispatched by specified scheduler.\nIf not specified, the pod will be dispatched by default scheduler." + +### fn spec.deploymentTemplate.spec.template.spec.withSchedulingGates + +```ts +withSchedulingGates(schedulingGates) +``` + +"SchedulingGates is an opaque list of values that if specified will block scheduling the pod.\nIf schedulingGates is not empty, the pod will stay in the SchedulingGated state and the\nscheduler will not attempt to schedule the pod.\n\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards." + +### fn spec.deploymentTemplate.spec.template.spec.withSchedulingGatesMixin + +```ts +withSchedulingGatesMixin(schedulingGates) +``` + +"SchedulingGates is an opaque list of values that if specified will block scheduling the pod.\nIf schedulingGates is not empty, the pod will stay in the SchedulingGated state and the\nscheduler will not attempt to schedule the pod.\n\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.withServiceAccount + +```ts +withServiceAccount(serviceAccount) +``` + +"DeprecatedServiceAccount is a deprecated alias for ServiceAccountName.\nDeprecated: Use serviceAccountName instead." + +### fn spec.deploymentTemplate.spec.template.spec.withServiceAccountName + +```ts +withServiceAccountName(serviceAccountName) +``` + +"ServiceAccountName is the name of the ServiceAccount to use to run this pod.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" + +### fn spec.deploymentTemplate.spec.template.spec.withSetHostnameAsFQDN + +```ts +withSetHostnameAsFQDN(setHostnameAsFQDN) +``` + +"If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).\nIn Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).\nIn Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN.\nIf a pod does not have FQDN, this has no effect.\nDefault to false." + +### fn spec.deploymentTemplate.spec.template.spec.withShareProcessNamespace + +```ts +withShareProcessNamespace(shareProcessNamespace) +``` + +"Share a single process namespace between all of the containers in a pod.\nWhen this is set containers will be able to view and signal processes from other containers\nin the same pod, and the first process in each container will not be assigned PID 1.\nHostPID and ShareProcessNamespace cannot both be set.\nOptional: Default to false." + +### fn spec.deploymentTemplate.spec.template.spec.withSubdomain + +```ts +withSubdomain(subdomain) +``` + +"If specified, the fully qualified Pod hostname will be \"...svc.\".\nIf not specified, the pod will not have a domainname at all." + +### fn spec.deploymentTemplate.spec.template.spec.withTerminationGracePeriodSeconds + +```ts +withTerminationGracePeriodSeconds(terminationGracePeriodSeconds) +``` + +"Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nIf this value is nil, the default grace period will be used instead.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nDefaults to 30 seconds." + +### fn spec.deploymentTemplate.spec.template.spec.withTolerations + +```ts +withTolerations(tolerations) +``` + +"If specified, the pod's tolerations." + +### fn spec.deploymentTemplate.spec.template.spec.withTolerationsMixin + +```ts +withTolerationsMixin(tolerations) +``` + +"If specified, the pod's tolerations." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.withTopologySpreadConstraints + +```ts +withTopologySpreadConstraints(topologySpreadConstraints) +``` + +"TopologySpreadConstraints describes how a group of pods ought to spread across topology\ndomains. Scheduler will schedule pods in a way which abides by the constraints.\nAll topologySpreadConstraints are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.withTopologySpreadConstraintsMixin + +```ts +withTopologySpreadConstraintsMixin(topologySpreadConstraints) +``` + +"TopologySpreadConstraints describes how a group of pods ought to spread across topology\ndomains. Scheduler will schedule pods in a way which abides by the constraints.\nAll topologySpreadConstraints are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.withVolumes + +```ts +withVolumes(volumes) +``` + +"List of volumes that can be mounted by containers belonging to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes" + +### fn spec.deploymentTemplate.spec.template.spec.withVolumesMixin + +```ts +withVolumesMixin(volumes) +``` + +"List of volumes that can be mounted by containers belonging to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes" + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity + +"If specified, the pod's scheduling constraints" + +## obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity + +"Describes node affinity scheduling rules for the pod." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.withPreferredDuringSchedulingIgnoredDuringExecution + +```ts +withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution) +``` + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node matches the corresponding matchExpressions; the\nnode(s) with the highest sum are the most preferred." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.withPreferredDuringSchedulingIgnoredDuringExecutionMixin + +```ts +withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution) +``` + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node matches the corresponding matchExpressions; the\nnode(s) with the highest sum are the most preferred." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node matches the corresponding matchExpressions; the\nnode(s) with the highest sum are the most preferred." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.withWeight + +```ts +withWeight(weight) +``` + +"Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100." + +## obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference + +"A node selector term, associated with the corresponding weight." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"A list of node selector requirements by node's labels." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"A list of node selector requirements by node's labels." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.withMatchFields + +```ts +withMatchFields(matchFields) +``` + +"A list of node selector requirements by node's fields." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.withMatchFieldsMixin + +```ts +withMatchFieldsMixin(matchFields) +``` + +"A list of node selector requirements by node's fields." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions + +"A list of node selector requirements by node's labels." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.withKey + +```ts +withKey(key) +``` + +"The label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.withValues + +```ts +withValues(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields + +"A list of node selector requirements by node's fields." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.withKey + +```ts +withKey(key) +``` + +"The label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.withOperator + +```ts +withOperator(operator) +``` + +"Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.withValues + +```ts +withValues(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution + +"If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to an update), the system\nmay or may not try to eventually evict the pod from its node." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.withNodeSelectorTerms + +```ts +withNodeSelectorTerms(nodeSelectorTerms) +``` + +"Required. A list of node selector terms. The terms are ORed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.withNodeSelectorTermsMixin + +```ts +withNodeSelectorTermsMixin(nodeSelectorTerms) +``` + +"Required. A list of node selector terms. The terms are ORed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms + +"Required. A list of node selector terms. The terms are ORed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"A list of node selector requirements by node's labels." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"A list of node selector requirements by node's labels." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.withMatchFields + +```ts +withMatchFields(matchFields) +``` + +"A list of node selector requirements by node's fields." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.withMatchFieldsMixin + +```ts +withMatchFieldsMixin(matchFields) +``` + +"A list of node selector requirements by node's fields." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions + +"A list of node selector requirements by node's labels." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.withKey + +```ts +withKey(key) +``` + +"The label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.withValues + +```ts +withValues(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields + +"A list of node selector requirements by node's fields." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.withKey + +```ts +withKey(key) +``` + +"The label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.withOperator + +```ts +withOperator(operator) +``` + +"Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.withValues + +```ts +withValues(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity + +"Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s))." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.withPreferredDuringSchedulingIgnoredDuringExecution + +```ts +withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution) +``` + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.withPreferredDuringSchedulingIgnoredDuringExecutionMixin + +```ts +withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution) +``` + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.withRequiredDuringSchedulingIgnoredDuringExecution + +```ts +withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution) +``` + +"If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.withRequiredDuringSchedulingIgnoredDuringExecutionMixin + +```ts +withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution) +``` + +"If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.withWeight + +```ts +withWeight(weight) +``` + +"weight associated with matching the corresponding podAffinityTerm,\nin the range 1-100." + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm + +"Required. A pod affinity term, associated with the corresponding weight." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMatchLabelKeys + +```ts +withMatchLabelKeys(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMatchLabelKeysMixin + +```ts +withMatchLabelKeysMixin(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMismatchLabelKeys + +```ts +withMismatchLabelKeys(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMismatchLabelKeysMixin + +```ts +withMismatchLabelKeysMixin(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withNamespaces + +```ts +withNamespaces(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withNamespacesMixin + +```ts +withNamespacesMixin(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withTopologyKey + +```ts +withTopologyKey(topologyKey) +``` + +"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector + +"A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector + +"A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution + +"If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMatchLabelKeys + +```ts +withMatchLabelKeys(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMatchLabelKeysMixin + +```ts +withMatchLabelKeysMixin(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMismatchLabelKeys + +```ts +withMismatchLabelKeys(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMismatchLabelKeysMixin + +```ts +withMismatchLabelKeysMixin(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withNamespaces + +```ts +withNamespaces(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withNamespacesMixin + +```ts +withNamespacesMixin(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.withTopologyKey + +```ts +withTopologyKey(topologyKey) +``` + +"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector + +"A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector + +"A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity + +"Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s))." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.withPreferredDuringSchedulingIgnoredDuringExecution + +```ts +withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution) +``` + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe anti-affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.withPreferredDuringSchedulingIgnoredDuringExecutionMixin + +```ts +withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution) +``` + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe anti-affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.withRequiredDuringSchedulingIgnoredDuringExecution + +```ts +withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution) +``` + +"If the anti-affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the anti-affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.withRequiredDuringSchedulingIgnoredDuringExecutionMixin + +```ts +withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution) +``` + +"If the anti-affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the anti-affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution + +"The scheduler will prefer to schedule pods to nodes that satisfy\nthe anti-affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.withWeight + +```ts +withWeight(weight) +``` + +"weight associated with matching the corresponding podAffinityTerm,\nin the range 1-100." + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm + +"Required. A pod affinity term, associated with the corresponding weight." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMatchLabelKeys + +```ts +withMatchLabelKeys(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMatchLabelKeysMixin + +```ts +withMatchLabelKeysMixin(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMismatchLabelKeys + +```ts +withMismatchLabelKeys(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withMismatchLabelKeysMixin + +```ts +withMismatchLabelKeysMixin(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withNamespaces + +```ts +withNamespaces(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withNamespacesMixin + +```ts +withNamespacesMixin(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.withTopologyKey + +```ts +withTopologyKey(topologyKey) +``` + +"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector + +"A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector + +"A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution + +"If the anti-affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the anti-affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMatchLabelKeys + +```ts +withMatchLabelKeys(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMatchLabelKeysMixin + +```ts +withMatchLabelKeysMixin(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both matchLabelKeys and labelSelector.\nAlso, matchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMismatchLabelKeys + +```ts +withMismatchLabelKeys(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withMismatchLabelKeysMixin + +```ts +withMismatchLabelKeysMixin(mismatchLabelKeys) +``` + +"MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both mismatchLabelKeys and labelSelector.\nAlso, mismatchLabelKeys cannot be set when labelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withNamespaces + +```ts +withNamespaces(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withNamespacesMixin + +```ts +withNamespacesMixin(namespaces) +``` + +"namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.withTopologyKey + +```ts +withTopologyKey(topologyKey) +``` + +"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector + +"A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector + +"A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.deploymentTemplate.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.containers + +"List of containers belonging to the pod.\nContainers cannot currently be added or removed.\nThere must be at least one container in a Pod.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withArgs + +```ts +withArgs(args) +``` + +"Arguments to the entrypoint.\nThe container image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +### fn spec.deploymentTemplate.spec.template.spec.containers.withArgsMixin + +```ts +withArgsMixin(args) +``` + +"Arguments to the entrypoint.\nThe container image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.withCommand + +```ts +withCommand(command) +``` + +"Entrypoint array. Not executed within a shell.\nThe container image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +### fn spec.deploymentTemplate.spec.template.spec.containers.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Entrypoint array. Not executed within a shell.\nThe container image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.withEnv + +```ts +withEnv(env) +``` + +"List of environment variables to set in the container.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withEnvFrom + +```ts +withEnvFrom(envFrom) +``` + +"List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withEnvFromMixin + +```ts +withEnvFromMixin(envFrom) +``` + +"List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.withEnvMixin + +```ts +withEnvMixin(env) +``` + +"List of environment variables to set in the container.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.withImage + +```ts +withImage(image) +``` + +"Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withImagePullPolicy + +```ts +withImagePullPolicy(imagePullPolicy) +``` + +"Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images" + +### fn spec.deploymentTemplate.spec.template.spec.containers.withName + +```ts +withName(name) +``` + +"Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withPorts + +```ts +withPorts(ports) +``` + +"List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withPortsMixin + +```ts +withPortsMixin(ports) +``` + +"List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.withResizePolicy + +```ts +withResizePolicy(resizePolicy) +``` + +"Resources resize policy for the container." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withResizePolicyMixin + +```ts +withResizePolicyMixin(resizePolicy) +``` + +"Resources resize policy for the container." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.withRestartPolicy + +```ts +withRestartPolicy(restartPolicy) +``` + +"RestartPolicy defines the restart behavior of individual containers in a pod.\nThis field may only be set for init containers, and the only allowed value is \"Always\".\nFor non-init containers or when this field is not specified,\nthe restart behavior is defined by the Pod's restart policy and the container type.\nSetting the RestartPolicy as \"Always\" for the init container will have the following effect:\nthis init container will be continually restarted on\nexit until all regular containers have terminated. Once all regular\ncontainers have completed, all init containers with restartPolicy \"Always\"\nwill be shut down. This lifecycle differs from normal init containers and\nis often referred to as a \"sidecar\" container. Although this init\ncontainer still starts in the init container sequence, it does not wait\nfor the container to complete before proceeding to the next init\ncontainer. Instead, the next init container starts immediately after this\ninit container is started, or after any startupProbe has successfully\ncompleted." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withStdin + +```ts +withStdin(stdin) +``` + +"Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withStdinOnce + +```ts +withStdinOnce(stdinOnce) +``` + +"Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false" + +### fn spec.deploymentTemplate.spec.template.spec.containers.withTerminationMessagePath + +```ts +withTerminationMessagePath(terminationMessagePath) +``` + +"Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withTerminationMessagePolicy + +```ts +withTerminationMessagePolicy(terminationMessagePolicy) +``` + +"Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withTty + +```ts +withTty(tty) +``` + +"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withVolumeDevices + +```ts +withVolumeDevices(volumeDevices) +``` + +"volumeDevices is the list of block devices to be used by the container." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withVolumeDevicesMixin + +```ts +withVolumeDevicesMixin(volumeDevices) +``` + +"volumeDevices is the list of block devices to be used by the container." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.withVolumeMounts + +```ts +withVolumeMounts(volumeMounts) +``` + +"Pod volumes to mount into the container's filesystem.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.withVolumeMountsMixin + +```ts +withVolumeMountsMixin(volumeMounts) +``` + +"Pod volumes to mount into the container's filesystem.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.withWorkingDir + +```ts +withWorkingDir(workingDir) +``` + +"Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated." + +## obj spec.deploymentTemplate.spec.template.spec.containers.env + +"List of environment variables to set in the container.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.withName + +```ts +withName(name) +``` + +"Name of the environment variable. Must be a C_IDENTIFIER." + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.withValue + +```ts +withValue(value) +``` + +"Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n\"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\"." + +## obj spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom + +"Source for the environment variable's value. Cannot be used if value is not empty." + +## obj spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.configMapKeyRef + +"Selects a key of a ConfigMap." + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.configMapKeyRef.withKey + +```ts +withKey(key) +``` + +"The key to select." + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.configMapKeyRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.configMapKeyRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the ConfigMap or its key must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.fieldRef + +"Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.fieldRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.fieldRef.withFieldPath + +```ts +withFieldPath(fieldPath) +``` + +"Path of the field to select in the specified API version." + +## obj spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.resourceFieldRef + +"Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.resourceFieldRef.withContainerName + +```ts +withContainerName(containerName) +``` + +"Container name: required for volumes, optional for env vars" + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.resourceFieldRef.withDivisor + +```ts +withDivisor(divisor) +``` + +"Specifies the output format of the exposed resources, defaults to \"1\ + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.resourceFieldRef.withResource + +```ts +withResource(resource) +``` + +"Required: resource to select" + +## obj spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.secretKeyRef + +"Selects a key of a secret in the pod's namespace" + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.secretKeyRef.withKey + +```ts +withKey(key) +``` + +"The key of the secret to select from. Must be a valid secret key." + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.secretKeyRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.containers.env.valueFrom.secretKeyRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the Secret or its key must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.containers.envFrom + +"List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.envFrom.withPrefix + +```ts +withPrefix(prefix) +``` + +"An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER." + +## obj spec.deploymentTemplate.spec.template.spec.containers.envFrom.configMapRef + +"The ConfigMap to select from" + +### fn spec.deploymentTemplate.spec.template.spec.containers.envFrom.configMapRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.containers.envFrom.configMapRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the ConfigMap must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.containers.envFrom.secretRef + +"The Secret to select from" + +### fn spec.deploymentTemplate.spec.template.spec.containers.envFrom.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.containers.envFrom.secretRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the Secret must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle + +"Actions that the management system should take in response to container lifecycle events.\nCannot be updated." + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart + +"PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.sleep + +"Sleep represents the duration that the container should sleep before being terminated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.sleep.withSeconds + +```ts +withSeconds(seconds) +``` + +"Seconds is the number of seconds to sleep." + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.tcpSocket + +"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.postStart.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop + +"PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.sleep + +"Sleep represents the duration that the container should sleep before being terminated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.sleep.withSeconds + +```ts +withSeconds(seconds) +``` + +"Seconds is the number of seconds to sleep." + +## obj spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.tcpSocket + +"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.containers.lifecycle.preStop.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.containers.livenessProbe + +"Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.withFailureThreshold + +```ts +withFailureThreshold(failureThreshold) +``` + +"Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.withInitialDelaySeconds + +```ts +withInitialDelaySeconds(initialDelaySeconds) +``` + +"Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.withPeriodSeconds + +```ts +withPeriodSeconds(periodSeconds) +``` + +"How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.withSuccessThreshold + +```ts +withSuccessThreshold(successThreshold) +``` + +"Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.withTerminationGracePeriodSeconds + +```ts +withTerminationGracePeriodSeconds(terminationGracePeriodSeconds) +``` + +"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.withTimeoutSeconds + +```ts +withTimeoutSeconds(timeoutSeconds) +``` + +"Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +## obj spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.grpc + +"GRPC specifies an action involving a GRPC port." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.grpc.withPort + +```ts +withPort(port) +``` + +"Port number of the gRPC service. Number must be in the range 1 to 65535." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.grpc.withService + +```ts +withService(service) +``` + +"Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." + +## obj spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.tcpSocket + +"TCPSocket specifies an action involving a TCP port." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.containers.livenessProbe.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.containers.ports + +"List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.ports.withContainerPort + +```ts +withContainerPort(containerPort) +``` + +"Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 < x < 65536." + +### fn spec.deploymentTemplate.spec.template.spec.containers.ports.withHostIP + +```ts +withHostIP(hostIP) +``` + +"What host IP to bind the external port to." + +### fn spec.deploymentTemplate.spec.template.spec.containers.ports.withHostPort + +```ts +withHostPort(hostPort) +``` + +"Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 < x < 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this." + +### fn spec.deploymentTemplate.spec.template.spec.containers.ports.withName + +```ts +withName(name) +``` + +"If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services." + +### fn spec.deploymentTemplate.spec.template.spec.containers.ports.withProtocol + +```ts +withProtocol(protocol) +``` + +"Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\"." + +## obj spec.deploymentTemplate.spec.template.spec.containers.readinessProbe + +"Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.withFailureThreshold + +```ts +withFailureThreshold(failureThreshold) +``` + +"Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.withInitialDelaySeconds + +```ts +withInitialDelaySeconds(initialDelaySeconds) +``` + +"Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.withPeriodSeconds + +```ts +withPeriodSeconds(periodSeconds) +``` + +"How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.withSuccessThreshold + +```ts +withSuccessThreshold(successThreshold) +``` + +"Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.withTerminationGracePeriodSeconds + +```ts +withTerminationGracePeriodSeconds(terminationGracePeriodSeconds) +``` + +"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.withTimeoutSeconds + +```ts +withTimeoutSeconds(timeoutSeconds) +``` + +"Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +## obj spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.grpc + +"GRPC specifies an action involving a GRPC port." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.grpc.withPort + +```ts +withPort(port) +``` + +"Port number of the gRPC service. Number must be in the range 1 to 65535." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.grpc.withService + +```ts +withService(service) +``` + +"Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." + +## obj spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.tcpSocket + +"TCPSocket specifies an action involving a TCP port." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.containers.readinessProbe.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.containers.resizePolicy + +"Resources resize policy for the container." + +### fn spec.deploymentTemplate.spec.template.spec.containers.resizePolicy.withResourceName + +```ts +withResourceName(resourceName) +``` + +"Name of the resource to which this resource resize policy applies.\nSupported values: cpu, memory." + +### fn spec.deploymentTemplate.spec.template.spec.containers.resizePolicy.withRestartPolicy + +```ts +withRestartPolicy(restartPolicy) +``` + +"Restart policy to apply when specified resource is resized.\nIf not specified, it defaults to NotRequired." + +## obj spec.deploymentTemplate.spec.template.spec.containers.resources + +"Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.deploymentTemplate.spec.template.spec.containers.resources.withClaims + +```ts +withClaims(claims) +``` + +"Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." + +### fn spec.deploymentTemplate.spec.template.spec.containers.resources.withClaimsMixin + +```ts +withClaimsMixin(claims) +``` + +"Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.resources.withLimits + +```ts +withLimits(limits) +``` + +"Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.deploymentTemplate.spec.template.spec.containers.resources.withLimitsMixin + +```ts +withLimitsMixin(limits) +``` + +"Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.resources.withRequests + +```ts +withRequests(requests) +``` + +"Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.deploymentTemplate.spec.template.spec.containers.resources.withRequestsMixin + +```ts +withRequestsMixin(requests) +``` + +"Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.containers.resources.claims + +"Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." + +### fn spec.deploymentTemplate.spec.template.spec.containers.resources.claims.withName + +```ts +withName(name) +``` + +"Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container." + +## obj spec.deploymentTemplate.spec.template.spec.containers.securityContext + +"SecurityContext defines the security options the container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.withAllowPrivilegeEscalation + +```ts +withAllowPrivilegeEscalation(allowPrivilegeEscalation) +``` + +"AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.withPrivileged + +```ts +withPrivileged(privileged) +``` + +"Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.withProcMount + +```ts +withProcMount(procMount) +``` + +"procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.withReadOnlyRootFilesystem + +```ts +withReadOnlyRootFilesystem(readOnlyRootFilesystem) +``` + +"Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.withRunAsGroup + +```ts +withRunAsGroup(runAsGroup) +``` + +"The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.withRunAsNonRoot + +```ts +withRunAsNonRoot(runAsNonRoot) +``` + +"Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.withRunAsUser + +```ts +withRunAsUser(runAsUser) +``` + +"The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." + +## obj spec.deploymentTemplate.spec.template.spec.containers.securityContext.appArmorProfile + +"appArmorProfile is the AppArmor options to use by this container. If set, this profile\noverrides the pod's appArmorProfile.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.appArmorProfile.withLocalhostProfile + +```ts +withLocalhostProfile(localhostProfile) +``` + +"localhostProfile indicates a profile loaded on the node that should be used.\nThe profile must be preconfigured on the node to work.\nMust match the loaded name of the profile.\nMust be set if and only if type is \"Localhost\"." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.appArmorProfile.withType + +```ts +withType(type) +``` + +"type indicates which kind of AppArmor profile will be applied.\nValid options are:\n Localhost - a profile pre-loaded on the node.\n RuntimeDefault - the container runtime's default profile.\n Unconfined - no AppArmor enforcement." + +## obj spec.deploymentTemplate.spec.template.spec.containers.securityContext.capabilities + +"The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.capabilities.withAdd + +```ts +withAdd(add) +``` + +"Added capabilities" + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.capabilities.withAddMixin + +```ts +withAddMixin(add) +``` + +"Added capabilities" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.capabilities.withDrop + +```ts +withDrop(drop) +``` + +"Removed capabilities" + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.capabilities.withDropMixin + +```ts +withDropMixin(drop) +``` + +"Removed capabilities" + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.containers.securityContext.seLinuxOptions + +"The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.seLinuxOptions.withLevel + +```ts +withLevel(level) +``` + +"Level is SELinux level label that applies to the container." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.seLinuxOptions.withRole + +```ts +withRole(role) +``` + +"Role is a SELinux role label that applies to the container." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.seLinuxOptions.withType + +```ts +withType(type) +``` + +"Type is a SELinux type label that applies to the container." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.seLinuxOptions.withUser + +```ts +withUser(user) +``` + +"User is a SELinux user label that applies to the container." + +## obj spec.deploymentTemplate.spec.template.spec.containers.securityContext.seccompProfile + +"The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.seccompProfile.withLocalhostProfile + +```ts +withLocalhostProfile(localhostProfile) +``` + +"localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.seccompProfile.withType + +```ts +withType(type) +``` + +"type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." + +## obj spec.deploymentTemplate.spec.template.spec.containers.securityContext.windowsOptions + +"The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.windowsOptions.withGmsaCredentialSpec + +```ts +withGmsaCredentialSpec(gmsaCredentialSpec) +``` + +"GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.windowsOptions.withGmsaCredentialSpecName + +```ts +withGmsaCredentialSpecName(gmsaCredentialSpecName) +``` + +"GMSACredentialSpecName is the name of the GMSA credential spec to use." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.windowsOptions.withHostProcess + +```ts +withHostProcess(hostProcess) +``` + +"HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." + +### fn spec.deploymentTemplate.spec.template.spec.containers.securityContext.windowsOptions.withRunAsUserName + +```ts +withRunAsUserName(runAsUserName) +``` + +"The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." + +## obj spec.deploymentTemplate.spec.template.spec.containers.startupProbe + +"StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.withFailureThreshold + +```ts +withFailureThreshold(failureThreshold) +``` + +"Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.withInitialDelaySeconds + +```ts +withInitialDelaySeconds(initialDelaySeconds) +``` + +"Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.withPeriodSeconds + +```ts +withPeriodSeconds(periodSeconds) +``` + +"How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.withSuccessThreshold + +```ts +withSuccessThreshold(successThreshold) +``` + +"Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.withTerminationGracePeriodSeconds + +```ts +withTerminationGracePeriodSeconds(terminationGracePeriodSeconds) +``` + +"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.withTimeoutSeconds + +```ts +withTimeoutSeconds(timeoutSeconds) +``` + +"Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +## obj spec.deploymentTemplate.spec.template.spec.containers.startupProbe.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.containers.startupProbe.grpc + +"GRPC specifies an action involving a GRPC port." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.grpc.withPort + +```ts +withPort(port) +``` + +"Port number of the gRPC service. Number must be in the range 1 to 65535." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.grpc.withService + +```ts +withService(service) +``` + +"Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." + +## obj spec.deploymentTemplate.spec.template.spec.containers.startupProbe.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.containers.startupProbe.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.containers.startupProbe.tcpSocket + +"TCPSocket specifies an action involving a TCP port." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.containers.startupProbe.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.containers.volumeDevices + +"volumeDevices is the list of block devices to be used by the container." + +### fn spec.deploymentTemplate.spec.template.spec.containers.volumeDevices.withDevicePath + +```ts +withDevicePath(devicePath) +``` + +"devicePath is the path inside of the container that the device will be mapped to." + +### fn spec.deploymentTemplate.spec.template.spec.containers.volumeDevices.withName + +```ts +withName(name) +``` + +"name must match the name of a persistentVolumeClaim in the pod" + +## obj spec.deploymentTemplate.spec.template.spec.containers.volumeMounts + +"Pod volumes to mount into the container's filesystem.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.containers.volumeMounts.withMountPath + +```ts +withMountPath(mountPath) +``` + +"Path within the container at which the volume should be mounted. Must\nnot contain ':'." + +### fn spec.deploymentTemplate.spec.template.spec.containers.volumeMounts.withMountPropagation + +```ts +withMountPropagation(mountPropagation) +``` + +"mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10.\nWhen RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified\n(which defaults to None)." + +### fn spec.deploymentTemplate.spec.template.spec.containers.volumeMounts.withName + +```ts +withName(name) +``` + +"This must match the Name of a Volume." + +### fn spec.deploymentTemplate.spec.template.spec.containers.volumeMounts.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false." + +### fn spec.deploymentTemplate.spec.template.spec.containers.volumeMounts.withRecursiveReadOnly + +```ts +withRecursiveReadOnly(recursiveReadOnly) +``` + +"RecursiveReadOnly specifies whether read-only mounts should be handled\nrecursively.\n\n\nIf ReadOnly is false, this field has no meaning and must be unspecified.\n\n\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made\nrecursively read-only. If this field is set to IfPossible, the mount is made\nrecursively read-only, if it is supported by the container runtime. If this\nfield is set to Enabled, the mount is made recursively read-only if it is\nsupported by the container runtime, otherwise the pod will not be started and\nan error will be generated to indicate the reason.\n\n\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to\nNone (or be unspecified, which defaults to None).\n\n\nIf this field is not specified, it is treated as an equivalent of Disabled." + +### fn spec.deploymentTemplate.spec.template.spec.containers.volumeMounts.withSubPath + +```ts +withSubPath(subPath) +``` + +"Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root)." + +### fn spec.deploymentTemplate.spec.template.spec.containers.volumeMounts.withSubPathExpr + +```ts +withSubPathExpr(subPathExpr) +``` + +"Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive." + +## obj spec.deploymentTemplate.spec.template.spec.dnsConfig + +"Specifies the DNS parameters of a pod.\nParameters specified here will be merged to the generated DNS\nconfiguration based on DNSPolicy." + +### fn spec.deploymentTemplate.spec.template.spec.dnsConfig.withNameservers + +```ts +withNameservers(nameservers) +``` + +"A list of DNS name server IP addresses.\nThis will be appended to the base nameservers generated from DNSPolicy.\nDuplicated nameservers will be removed." + +### fn spec.deploymentTemplate.spec.template.spec.dnsConfig.withNameserversMixin + +```ts +withNameserversMixin(nameservers) +``` + +"A list of DNS name server IP addresses.\nThis will be appended to the base nameservers generated from DNSPolicy.\nDuplicated nameservers will be removed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.dnsConfig.withOptions + +```ts +withOptions(options) +``` + +"A list of DNS resolver options.\nThis will be merged with the base options generated from DNSPolicy.\nDuplicated entries will be removed. Resolution options given in Options\nwill override those that appear in the base DNSPolicy." + +### fn spec.deploymentTemplate.spec.template.spec.dnsConfig.withOptionsMixin + +```ts +withOptionsMixin(options) +``` + +"A list of DNS resolver options.\nThis will be merged with the base options generated from DNSPolicy.\nDuplicated entries will be removed. Resolution options given in Options\nwill override those that appear in the base DNSPolicy." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.dnsConfig.withSearches + +```ts +withSearches(searches) +``` + +"A list of DNS search domains for host-name lookup.\nThis will be appended to the base search paths generated from DNSPolicy.\nDuplicated search paths will be removed." + +### fn spec.deploymentTemplate.spec.template.spec.dnsConfig.withSearchesMixin + +```ts +withSearchesMixin(searches) +``` + +"A list of DNS search domains for host-name lookup.\nThis will be appended to the base search paths generated from DNSPolicy.\nDuplicated search paths will be removed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.dnsConfig.options + +"A list of DNS resolver options.\nThis will be merged with the base options generated from DNSPolicy.\nDuplicated entries will be removed. Resolution options given in Options\nwill override those that appear in the base DNSPolicy." + +### fn spec.deploymentTemplate.spec.template.spec.dnsConfig.options.withName + +```ts +withName(name) +``` + +"Required." + +### fn spec.deploymentTemplate.spec.template.spec.dnsConfig.options.withValue + +```ts +withValue(value) +``` + + + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers + +"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\npod to perform user-initiated actions such as debugging. This list cannot be specified when\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withArgs + +```ts +withArgs(args) +``` + +"Arguments to the entrypoint.\nThe image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withArgsMixin + +```ts +withArgsMixin(args) +``` + +"Arguments to the entrypoint.\nThe image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withCommand + +```ts +withCommand(command) +``` + +"Entrypoint array. Not executed within a shell.\nThe image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Entrypoint array. Not executed within a shell.\nThe image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withEnv + +```ts +withEnv(env) +``` + +"List of environment variables to set in the container.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withEnvFrom + +```ts +withEnvFrom(envFrom) +``` + +"List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withEnvFromMixin + +```ts +withEnvFromMixin(envFrom) +``` + +"List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withEnvMixin + +```ts +withEnvMixin(env) +``` + +"List of environment variables to set in the container.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withImage + +```ts +withImage(image) +``` + +"Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withImagePullPolicy + +```ts +withImagePullPolicy(imagePullPolicy) +``` + +"Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withName + +```ts +withName(name) +``` + +"Name of the ephemeral container specified as a DNS_LABEL.\nThis name must be unique among all containers, init containers and ephemeral containers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withPorts + +```ts +withPorts(ports) +``` + +"Ports are not allowed for ephemeral containers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withPortsMixin + +```ts +withPortsMixin(ports) +``` + +"Ports are not allowed for ephemeral containers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withResizePolicy + +```ts +withResizePolicy(resizePolicy) +``` + +"Resources resize policy for the container." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withResizePolicyMixin + +```ts +withResizePolicyMixin(resizePolicy) +``` + +"Resources resize policy for the container." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withRestartPolicy + +```ts +withRestartPolicy(restartPolicy) +``` + +"Restart policy for the container to manage the restart behavior of each\ncontainer within a pod.\nThis may only be set for init containers. You cannot set this field on\nephemeral containers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withStdin + +```ts +withStdin(stdin) +``` + +"Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withStdinOnce + +```ts +withStdinOnce(stdinOnce) +``` + +"Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withTargetContainerName + +```ts +withTargetContainerName(targetContainerName) +``` + +"If set, the name of the container from PodSpec that this ephemeral container targets.\nThe ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.\nIf not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\n\nThe container runtime must implement support for this feature. If the runtime does not\nsupport namespace targeting then the result of setting this field is undefined." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withTerminationMessagePath + +```ts +withTerminationMessagePath(terminationMessagePath) +``` + +"Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withTerminationMessagePolicy + +```ts +withTerminationMessagePolicy(terminationMessagePolicy) +``` + +"Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withTty + +```ts +withTty(tty) +``` + +"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withVolumeDevices + +```ts +withVolumeDevices(volumeDevices) +``` + +"volumeDevices is the list of block devices to be used by the container." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withVolumeDevicesMixin + +```ts +withVolumeDevicesMixin(volumeDevices) +``` + +"volumeDevices is the list of block devices to be used by the container." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withVolumeMounts + +```ts +withVolumeMounts(volumeMounts) +``` + +"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withVolumeMountsMixin + +```ts +withVolumeMountsMixin(volumeMounts) +``` + +"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.withWorkingDir + +```ts +withWorkingDir(workingDir) +``` + +"Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env + +"List of environment variables to set in the container.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.withName + +```ts +withName(name) +``` + +"Name of the environment variable. Must be a C_IDENTIFIER." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.withValue + +```ts +withValue(value) +``` + +"Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n\"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\"." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom + +"Source for the environment variable's value. Cannot be used if value is not empty." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.configMapKeyRef + +"Selects a key of a ConfigMap." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.configMapKeyRef.withKey + +```ts +withKey(key) +``` + +"The key to select." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.configMapKeyRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.configMapKeyRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the ConfigMap or its key must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.fieldRef + +"Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.fieldRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.fieldRef.withFieldPath + +```ts +withFieldPath(fieldPath) +``` + +"Path of the field to select in the specified API version." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.resourceFieldRef + +"Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.resourceFieldRef.withContainerName + +```ts +withContainerName(containerName) +``` + +"Container name: required for volumes, optional for env vars" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.resourceFieldRef.withDivisor + +```ts +withDivisor(divisor) +``` + +"Specifies the output format of the exposed resources, defaults to \"1\ + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.resourceFieldRef.withResource + +```ts +withResource(resource) +``` + +"Required: resource to select" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.secretKeyRef + +"Selects a key of a secret in the pod's namespace" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.secretKeyRef.withKey + +```ts +withKey(key) +``` + +"The key of the secret to select from. Must be a valid secret key." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.secretKeyRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.env.valueFrom.secretKeyRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the Secret or its key must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.envFrom + +"List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.envFrom.withPrefix + +```ts +withPrefix(prefix) +``` + +"An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.envFrom.configMapRef + +"The ConfigMap to select from" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.envFrom.configMapRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.envFrom.configMapRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the ConfigMap must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.envFrom.secretRef + +"The Secret to select from" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.envFrom.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.envFrom.secretRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the Secret must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle + +"Lifecycle is not allowed for ephemeral containers." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart + +"PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.sleep + +"Sleep represents the duration that the container should sleep before being terminated." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.sleep.withSeconds + +```ts +withSeconds(seconds) +``` + +"Seconds is the number of seconds to sleep." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.tcpSocket + +"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.postStart.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop + +"PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.sleep + +"Sleep represents the duration that the container should sleep before being terminated." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.sleep.withSeconds + +```ts +withSeconds(seconds) +``` + +"Seconds is the number of seconds to sleep." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.tcpSocket + +"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.lifecycle.preStop.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe + +"Probes are not allowed for ephemeral containers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.withFailureThreshold + +```ts +withFailureThreshold(failureThreshold) +``` + +"Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.withInitialDelaySeconds + +```ts +withInitialDelaySeconds(initialDelaySeconds) +``` + +"Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.withPeriodSeconds + +```ts +withPeriodSeconds(periodSeconds) +``` + +"How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.withSuccessThreshold + +```ts +withSuccessThreshold(successThreshold) +``` + +"Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.withTerminationGracePeriodSeconds + +```ts +withTerminationGracePeriodSeconds(terminationGracePeriodSeconds) +``` + +"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.withTimeoutSeconds + +```ts +withTimeoutSeconds(timeoutSeconds) +``` + +"Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.grpc + +"GRPC specifies an action involving a GRPC port." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.grpc.withPort + +```ts +withPort(port) +``` + +"Port number of the gRPC service. Number must be in the range 1 to 65535." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.grpc.withService + +```ts +withService(service) +``` + +"Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.tcpSocket + +"TCPSocket specifies an action involving a TCP port." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.livenessProbe.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.ports + +"Ports are not allowed for ephemeral containers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.ports.withContainerPort + +```ts +withContainerPort(containerPort) +``` + +"Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 < x < 65536." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.ports.withHostIP + +```ts +withHostIP(hostIP) +``` + +"What host IP to bind the external port to." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.ports.withHostPort + +```ts +withHostPort(hostPort) +``` + +"Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 < x < 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.ports.withName + +```ts +withName(name) +``` + +"If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.ports.withProtocol + +```ts +withProtocol(protocol) +``` + +"Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\"." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe + +"Probes are not allowed for ephemeral containers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.withFailureThreshold + +```ts +withFailureThreshold(failureThreshold) +``` + +"Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.withInitialDelaySeconds + +```ts +withInitialDelaySeconds(initialDelaySeconds) +``` + +"Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.withPeriodSeconds + +```ts +withPeriodSeconds(periodSeconds) +``` + +"How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.withSuccessThreshold + +```ts +withSuccessThreshold(successThreshold) +``` + +"Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.withTerminationGracePeriodSeconds + +```ts +withTerminationGracePeriodSeconds(terminationGracePeriodSeconds) +``` + +"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.withTimeoutSeconds + +```ts +withTimeoutSeconds(timeoutSeconds) +``` + +"Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.grpc + +"GRPC specifies an action involving a GRPC port." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.grpc.withPort + +```ts +withPort(port) +``` + +"Port number of the gRPC service. Number must be in the range 1 to 65535." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.grpc.withService + +```ts +withService(service) +``` + +"Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.tcpSocket + +"TCPSocket specifies an action involving a TCP port." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.readinessProbe.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resizePolicy + +"Resources resize policy for the container." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resizePolicy.withResourceName + +```ts +withResourceName(resourceName) +``` + +"Name of the resource to which this resource resize policy applies.\nSupported values: cpu, memory." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resizePolicy.withRestartPolicy + +```ts +withRestartPolicy(restartPolicy) +``` + +"Restart policy to apply when specified resource is resized.\nIf not specified, it defaults to NotRequired." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resources + +"Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources\nalready allocated to the pod." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resources.withClaims + +```ts +withClaims(claims) +``` + +"Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resources.withClaimsMixin + +```ts +withClaimsMixin(claims) +``` + +"Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resources.withLimits + +```ts +withLimits(limits) +``` + +"Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resources.withLimitsMixin + +```ts +withLimitsMixin(limits) +``` + +"Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resources.withRequests + +```ts +withRequests(requests) +``` + +"Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resources.withRequestsMixin + +```ts +withRequestsMixin(requests) +``` + +"Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resources.claims + +"Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.resources.claims.withName + +```ts +withName(name) +``` + +"Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext + +"Optional: SecurityContext defines the security options the ephemeral container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.withAllowPrivilegeEscalation + +```ts +withAllowPrivilegeEscalation(allowPrivilegeEscalation) +``` + +"AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.withPrivileged + +```ts +withPrivileged(privileged) +``` + +"Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.withProcMount + +```ts +withProcMount(procMount) +``` + +"procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.withReadOnlyRootFilesystem + +```ts +withReadOnlyRootFilesystem(readOnlyRootFilesystem) +``` + +"Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.withRunAsGroup + +```ts +withRunAsGroup(runAsGroup) +``` + +"The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.withRunAsNonRoot + +```ts +withRunAsNonRoot(runAsNonRoot) +``` + +"Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.withRunAsUser + +```ts +withRunAsUser(runAsUser) +``` + +"The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.appArmorProfile + +"appArmorProfile is the AppArmor options to use by this container. If set, this profile\noverrides the pod's appArmorProfile.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.appArmorProfile.withLocalhostProfile + +```ts +withLocalhostProfile(localhostProfile) +``` + +"localhostProfile indicates a profile loaded on the node that should be used.\nThe profile must be preconfigured on the node to work.\nMust match the loaded name of the profile.\nMust be set if and only if type is \"Localhost\"." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.appArmorProfile.withType + +```ts +withType(type) +``` + +"type indicates which kind of AppArmor profile will be applied.\nValid options are:\n Localhost - a profile pre-loaded on the node.\n RuntimeDefault - the container runtime's default profile.\n Unconfined - no AppArmor enforcement." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.capabilities + +"The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.capabilities.withAdd + +```ts +withAdd(add) +``` + +"Added capabilities" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.capabilities.withAddMixin + +```ts +withAddMixin(add) +``` + +"Added capabilities" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.capabilities.withDrop + +```ts +withDrop(drop) +``` + +"Removed capabilities" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.capabilities.withDropMixin + +```ts +withDropMixin(drop) +``` + +"Removed capabilities" + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.seLinuxOptions + +"The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.seLinuxOptions.withLevel + +```ts +withLevel(level) +``` + +"Level is SELinux level label that applies to the container." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.seLinuxOptions.withRole + +```ts +withRole(role) +``` + +"Role is a SELinux role label that applies to the container." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.seLinuxOptions.withType + +```ts +withType(type) +``` + +"Type is a SELinux type label that applies to the container." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.seLinuxOptions.withUser + +```ts +withUser(user) +``` + +"User is a SELinux user label that applies to the container." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.seccompProfile + +"The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.seccompProfile.withLocalhostProfile + +```ts +withLocalhostProfile(localhostProfile) +``` + +"localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.seccompProfile.withType + +```ts +withType(type) +``` + +"type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.windowsOptions + +"The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.windowsOptions.withGmsaCredentialSpec + +```ts +withGmsaCredentialSpec(gmsaCredentialSpec) +``` + +"GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.windowsOptions.withGmsaCredentialSpecName + +```ts +withGmsaCredentialSpecName(gmsaCredentialSpecName) +``` + +"GMSACredentialSpecName is the name of the GMSA credential spec to use." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.windowsOptions.withHostProcess + +```ts +withHostProcess(hostProcess) +``` + +"HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.securityContext.windowsOptions.withRunAsUserName + +```ts +withRunAsUserName(runAsUserName) +``` + +"The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe + +"Probes are not allowed for ephemeral containers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.withFailureThreshold + +```ts +withFailureThreshold(failureThreshold) +``` + +"Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.withInitialDelaySeconds + +```ts +withInitialDelaySeconds(initialDelaySeconds) +``` + +"Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.withPeriodSeconds + +```ts +withPeriodSeconds(periodSeconds) +``` + +"How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.withSuccessThreshold + +```ts +withSuccessThreshold(successThreshold) +``` + +"Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.withTerminationGracePeriodSeconds + +```ts +withTerminationGracePeriodSeconds(terminationGracePeriodSeconds) +``` + +"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.withTimeoutSeconds + +```ts +withTimeoutSeconds(timeoutSeconds) +``` + +"Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.grpc + +"GRPC specifies an action involving a GRPC port." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.grpc.withPort + +```ts +withPort(port) +``` + +"Port number of the gRPC service. Number must be in the range 1 to 65535." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.grpc.withService + +```ts +withService(service) +``` + +"Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.tcpSocket + +"TCPSocket specifies an action involving a TCP port." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.startupProbe.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeDevices + +"volumeDevices is the list of block devices to be used by the container." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeDevices.withDevicePath + +```ts +withDevicePath(devicePath) +``` + +"devicePath is the path inside of the container that the device will be mapped to." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeDevices.withName + +```ts +withName(name) +``` + +"name must match the name of a persistentVolumeClaim in the pod" + +## obj spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeMounts + +"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeMounts.withMountPath + +```ts +withMountPath(mountPath) +``` + +"Path within the container at which the volume should be mounted. Must\nnot contain ':'." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeMounts.withMountPropagation + +```ts +withMountPropagation(mountPropagation) +``` + +"mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10.\nWhen RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified\n(which defaults to None)." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeMounts.withName + +```ts +withName(name) +``` + +"This must match the Name of a Volume." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeMounts.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeMounts.withRecursiveReadOnly + +```ts +withRecursiveReadOnly(recursiveReadOnly) +``` + +"RecursiveReadOnly specifies whether read-only mounts should be handled\nrecursively.\n\n\nIf ReadOnly is false, this field has no meaning and must be unspecified.\n\n\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made\nrecursively read-only. If this field is set to IfPossible, the mount is made\nrecursively read-only, if it is supported by the container runtime. If this\nfield is set to Enabled, the mount is made recursively read-only if it is\nsupported by the container runtime, otherwise the pod will not be started and\nan error will be generated to indicate the reason.\n\n\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to\nNone (or be unspecified, which defaults to None).\n\n\nIf this field is not specified, it is treated as an equivalent of Disabled." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeMounts.withSubPath + +```ts +withSubPath(subPath) +``` + +"Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root)." + +### fn spec.deploymentTemplate.spec.template.spec.ephemeralContainers.volumeMounts.withSubPathExpr + +```ts +withSubPathExpr(subPathExpr) +``` + +"Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive." + +## obj spec.deploymentTemplate.spec.template.spec.hostAliases + +"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts\nfile if specified." + +### fn spec.deploymentTemplate.spec.template.spec.hostAliases.withHostnames + +```ts +withHostnames(hostnames) +``` + +"Hostnames for the above IP address." + +### fn spec.deploymentTemplate.spec.template.spec.hostAliases.withHostnamesMixin + +```ts +withHostnamesMixin(hostnames) +``` + +"Hostnames for the above IP address." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.hostAliases.withIp + +```ts +withIp(ip) +``` + +"IP address of the host file entry." + +## obj spec.deploymentTemplate.spec.template.spec.imagePullSecrets + +"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\nIf specified, these secrets will be passed to individual puller implementations for them to use.\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod" + +### fn spec.deploymentTemplate.spec.template.spec.imagePullSecrets.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers + +"List of initialization containers belonging to the pod.\nInit containers are executed in order prior to containers being started. If any\ninit container fails, the pod is considered to have failed and is handled according\nto its restartPolicy. The name for an init container or normal container must be\nunique among all containers.\nInit containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.\nThe resourceRequirements of an init container are taken into account during scheduling\nby finding the highest request/limit for each resource type, and then using the max of\nof that value or the sum of the normal containers. Limits are applied to init containers\nin a similar fashion.\nInit containers cannot currently be added or removed.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withArgs + +```ts +withArgs(args) +``` + +"Arguments to the entrypoint.\nThe container image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withArgsMixin + +```ts +withArgsMixin(args) +``` + +"Arguments to the entrypoint.\nThe container image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withCommand + +```ts +withCommand(command) +``` + +"Entrypoint array. Not executed within a shell.\nThe container image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Entrypoint array. Not executed within a shell.\nThe container image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withEnv + +```ts +withEnv(env) +``` + +"List of environment variables to set in the container.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withEnvFrom + +```ts +withEnvFrom(envFrom) +``` + +"List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withEnvFromMixin + +```ts +withEnvFromMixin(envFrom) +``` + +"List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withEnvMixin + +```ts +withEnvMixin(env) +``` + +"List of environment variables to set in the container.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withImage + +```ts +withImage(image) +``` + +"Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withImagePullPolicy + +```ts +withImagePullPolicy(imagePullPolicy) +``` + +"Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withName + +```ts +withName(name) +``` + +"Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withPorts + +```ts +withPorts(ports) +``` + +"List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withPortsMixin + +```ts +withPortsMixin(ports) +``` + +"List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withResizePolicy + +```ts +withResizePolicy(resizePolicy) +``` + +"Resources resize policy for the container." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withResizePolicyMixin + +```ts +withResizePolicyMixin(resizePolicy) +``` + +"Resources resize policy for the container." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withRestartPolicy + +```ts +withRestartPolicy(restartPolicy) +``` + +"RestartPolicy defines the restart behavior of individual containers in a pod.\nThis field may only be set for init containers, and the only allowed value is \"Always\".\nFor non-init containers or when this field is not specified,\nthe restart behavior is defined by the Pod's restart policy and the container type.\nSetting the RestartPolicy as \"Always\" for the init container will have the following effect:\nthis init container will be continually restarted on\nexit until all regular containers have terminated. Once all regular\ncontainers have completed, all init containers with restartPolicy \"Always\"\nwill be shut down. This lifecycle differs from normal init containers and\nis often referred to as a \"sidecar\" container. Although this init\ncontainer still starts in the init container sequence, it does not wait\nfor the container to complete before proceeding to the next init\ncontainer. Instead, the next init container starts immediately after this\ninit container is started, or after any startupProbe has successfully\ncompleted." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withStdin + +```ts +withStdin(stdin) +``` + +"Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withStdinOnce + +```ts +withStdinOnce(stdinOnce) +``` + +"Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withTerminationMessagePath + +```ts +withTerminationMessagePath(terminationMessagePath) +``` + +"Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withTerminationMessagePolicy + +```ts +withTerminationMessagePolicy(terminationMessagePolicy) +``` + +"Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withTty + +```ts +withTty(tty) +``` + +"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withVolumeDevices + +```ts +withVolumeDevices(volumeDevices) +``` + +"volumeDevices is the list of block devices to be used by the container." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withVolumeDevicesMixin + +```ts +withVolumeDevicesMixin(volumeDevices) +``` + +"volumeDevices is the list of block devices to be used by the container." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withVolumeMounts + +```ts +withVolumeMounts(volumeMounts) +``` + +"Pod volumes to mount into the container's filesystem.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withVolumeMountsMixin + +```ts +withVolumeMountsMixin(volumeMounts) +``` + +"Pod volumes to mount into the container's filesystem.\nCannot be updated." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.withWorkingDir + +```ts +withWorkingDir(workingDir) +``` + +"Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.env + +"List of environment variables to set in the container.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.withName + +```ts +withName(name) +``` + +"Name of the environment variable. Must be a C_IDENTIFIER." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.withValue + +```ts +withValue(value) +``` + +"Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n\"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\"." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom + +"Source for the environment variable's value. Cannot be used if value is not empty." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.configMapKeyRef + +"Selects a key of a ConfigMap." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.configMapKeyRef.withKey + +```ts +withKey(key) +``` + +"The key to select." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.configMapKeyRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.configMapKeyRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the ConfigMap or its key must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.fieldRef + +"Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.fieldRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.fieldRef.withFieldPath + +```ts +withFieldPath(fieldPath) +``` + +"Path of the field to select in the specified API version." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.resourceFieldRef + +"Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.resourceFieldRef.withContainerName + +```ts +withContainerName(containerName) +``` + +"Container name: required for volumes, optional for env vars" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.resourceFieldRef.withDivisor + +```ts +withDivisor(divisor) +``` + +"Specifies the output format of the exposed resources, defaults to \"1\ + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.resourceFieldRef.withResource + +```ts +withResource(resource) +``` + +"Required: resource to select" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.secretKeyRef + +"Selects a key of a secret in the pod's namespace" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.secretKeyRef.withKey + +```ts +withKey(key) +``` + +"The key of the secret to select from. Must be a valid secret key." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.secretKeyRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.env.valueFrom.secretKeyRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the Secret or its key must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.envFrom + +"List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.envFrom.withPrefix + +```ts +withPrefix(prefix) +``` + +"An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.envFrom.configMapRef + +"The ConfigMap to select from" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.envFrom.configMapRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.envFrom.configMapRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the ConfigMap must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.envFrom.secretRef + +"The Secret to select from" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.envFrom.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.envFrom.secretRef.withOptional + +```ts +withOptional(optional) +``` + +"Specify whether the Secret must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle + +"Actions that the management system should take in response to container lifecycle events.\nCannot be updated." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart + +"PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.sleep + +"Sleep represents the duration that the container should sleep before being terminated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.sleep.withSeconds + +```ts +withSeconds(seconds) +``` + +"Seconds is the number of seconds to sleep." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.tcpSocket + +"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.postStart.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop + +"PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.sleep + +"Sleep represents the duration that the container should sleep before being terminated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.sleep.withSeconds + +```ts +withSeconds(seconds) +``` + +"Seconds is the number of seconds to sleep." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.tcpSocket + +"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.lifecycle.preStop.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe + +"Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.withFailureThreshold + +```ts +withFailureThreshold(failureThreshold) +``` + +"Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.withInitialDelaySeconds + +```ts +withInitialDelaySeconds(initialDelaySeconds) +``` + +"Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.withPeriodSeconds + +```ts +withPeriodSeconds(periodSeconds) +``` + +"How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.withSuccessThreshold + +```ts +withSuccessThreshold(successThreshold) +``` + +"Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.withTerminationGracePeriodSeconds + +```ts +withTerminationGracePeriodSeconds(terminationGracePeriodSeconds) +``` + +"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.withTimeoutSeconds + +```ts +withTimeoutSeconds(timeoutSeconds) +``` + +"Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.grpc + +"GRPC specifies an action involving a GRPC port." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.grpc.withPort + +```ts +withPort(port) +``` + +"Port number of the gRPC service. Number must be in the range 1 to 65535." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.grpc.withService + +```ts +withService(service) +``` + +"Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.tcpSocket + +"TCPSocket specifies an action involving a TCP port." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.livenessProbe.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.ports + +"List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.ports.withContainerPort + +```ts +withContainerPort(containerPort) +``` + +"Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 < x < 65536." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.ports.withHostIP + +```ts +withHostIP(hostIP) +``` + +"What host IP to bind the external port to." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.ports.withHostPort + +```ts +withHostPort(hostPort) +``` + +"Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 < x < 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.ports.withName + +```ts +withName(name) +``` + +"If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.ports.withProtocol + +```ts +withProtocol(protocol) +``` + +"Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\"." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe + +"Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.withFailureThreshold + +```ts +withFailureThreshold(failureThreshold) +``` + +"Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.withInitialDelaySeconds + +```ts +withInitialDelaySeconds(initialDelaySeconds) +``` + +"Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.withPeriodSeconds + +```ts +withPeriodSeconds(periodSeconds) +``` + +"How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.withSuccessThreshold + +```ts +withSuccessThreshold(successThreshold) +``` + +"Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.withTerminationGracePeriodSeconds + +```ts +withTerminationGracePeriodSeconds(terminationGracePeriodSeconds) +``` + +"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.withTimeoutSeconds + +```ts +withTimeoutSeconds(timeoutSeconds) +``` + +"Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.grpc + +"GRPC specifies an action involving a GRPC port." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.grpc.withPort + +```ts +withPort(port) +``` + +"Port number of the gRPC service. Number must be in the range 1 to 65535." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.grpc.withService + +```ts +withService(service) +``` + +"Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.tcpSocket + +"TCPSocket specifies an action involving a TCP port." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.readinessProbe.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.resizePolicy + +"Resources resize policy for the container." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.resizePolicy.withResourceName + +```ts +withResourceName(resourceName) +``` + +"Name of the resource to which this resource resize policy applies.\nSupported values: cpu, memory." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.resizePolicy.withRestartPolicy + +```ts +withRestartPolicy(restartPolicy) +``` + +"Restart policy to apply when specified resource is resized.\nIf not specified, it defaults to NotRequired." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.resources + +"Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.resources.withClaims + +```ts +withClaims(claims) +``` + +"Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.resources.withClaimsMixin + +```ts +withClaimsMixin(claims) +``` + +"Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.resources.withLimits + +```ts +withLimits(limits) +``` + +"Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.resources.withLimitsMixin + +```ts +withLimitsMixin(limits) +``` + +"Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.resources.withRequests + +```ts +withRequests(requests) +``` + +"Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.resources.withRequestsMixin + +```ts +withRequestsMixin(requests) +``` + +"Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.resources.claims + +"Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.resources.claims.withName + +```ts +withName(name) +``` + +"Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.securityContext + +"SecurityContext defines the security options the container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.withAllowPrivilegeEscalation + +```ts +withAllowPrivilegeEscalation(allowPrivilegeEscalation) +``` + +"AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.withPrivileged + +```ts +withPrivileged(privileged) +``` + +"Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.withProcMount + +```ts +withProcMount(procMount) +``` + +"procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.withReadOnlyRootFilesystem + +```ts +withReadOnlyRootFilesystem(readOnlyRootFilesystem) +``` + +"Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.withRunAsGroup + +```ts +withRunAsGroup(runAsGroup) +``` + +"The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.withRunAsNonRoot + +```ts +withRunAsNonRoot(runAsNonRoot) +``` + +"Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.withRunAsUser + +```ts +withRunAsUser(runAsUser) +``` + +"The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.appArmorProfile + +"appArmorProfile is the AppArmor options to use by this container. If set, this profile\noverrides the pod's appArmorProfile.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.appArmorProfile.withLocalhostProfile + +```ts +withLocalhostProfile(localhostProfile) +``` + +"localhostProfile indicates a profile loaded on the node that should be used.\nThe profile must be preconfigured on the node to work.\nMust match the loaded name of the profile.\nMust be set if and only if type is \"Localhost\"." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.appArmorProfile.withType + +```ts +withType(type) +``` + +"type indicates which kind of AppArmor profile will be applied.\nValid options are:\n Localhost - a profile pre-loaded on the node.\n RuntimeDefault - the container runtime's default profile.\n Unconfined - no AppArmor enforcement." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.capabilities + +"The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.capabilities.withAdd + +```ts +withAdd(add) +``` + +"Added capabilities" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.capabilities.withAddMixin + +```ts +withAddMixin(add) +``` + +"Added capabilities" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.capabilities.withDrop + +```ts +withDrop(drop) +``` + +"Removed capabilities" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.capabilities.withDropMixin + +```ts +withDropMixin(drop) +``` + +"Removed capabilities" + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.seLinuxOptions + +"The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.seLinuxOptions.withLevel + +```ts +withLevel(level) +``` + +"Level is SELinux level label that applies to the container." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.seLinuxOptions.withRole + +```ts +withRole(role) +``` + +"Role is a SELinux role label that applies to the container." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.seLinuxOptions.withType + +```ts +withType(type) +``` + +"Type is a SELinux type label that applies to the container." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.seLinuxOptions.withUser + +```ts +withUser(user) +``` + +"User is a SELinux user label that applies to the container." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.seccompProfile + +"The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.seccompProfile.withLocalhostProfile + +```ts +withLocalhostProfile(localhostProfile) +``` + +"localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.seccompProfile.withType + +```ts +withType(type) +``` + +"type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.windowsOptions + +"The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.windowsOptions.withGmsaCredentialSpec + +```ts +withGmsaCredentialSpec(gmsaCredentialSpec) +``` + +"GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.windowsOptions.withGmsaCredentialSpecName + +```ts +withGmsaCredentialSpecName(gmsaCredentialSpecName) +``` + +"GMSACredentialSpecName is the name of the GMSA credential spec to use." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.windowsOptions.withHostProcess + +```ts +withHostProcess(hostProcess) +``` + +"HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.securityContext.windowsOptions.withRunAsUserName + +```ts +withRunAsUserName(runAsUserName) +``` + +"The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe + +"StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.withFailureThreshold + +```ts +withFailureThreshold(failureThreshold) +``` + +"Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.withInitialDelaySeconds + +```ts +withInitialDelaySeconds(initialDelaySeconds) +``` + +"Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.withPeriodSeconds + +```ts +withPeriodSeconds(periodSeconds) +``` + +"How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.withSuccessThreshold + +```ts +withSuccessThreshold(successThreshold) +``` + +"Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.withTerminationGracePeriodSeconds + +```ts +withTerminationGracePeriodSeconds(terminationGracePeriodSeconds) +``` + +"Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.withTimeoutSeconds + +```ts +withTimeoutSeconds(timeoutSeconds) +``` + +"Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.exec + +"Exec specifies the action to take." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.exec.withCommand + +```ts +withCommand(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.exec.withCommandMixin + +```ts +withCommandMixin(command) +``` + +"Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.grpc + +"GRPC specifies an action involving a GRPC port." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.grpc.withPort + +```ts +withPort(port) +``` + +"Port number of the gRPC service. Number must be in the range 1 to 65535." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.grpc.withService + +```ts +withService(service) +``` + +"Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.httpGet + +"HTTPGet specifies the http request to perform." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.httpGet.withHost + +```ts +withHost(host) +``` + +"Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.httpGet.withHttpHeaders + +```ts +withHttpHeaders(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.httpGet.withHttpHeadersMixin + +```ts +withHttpHeadersMixin(httpHeaders) +``` + +"Custom headers to set in the request. HTTP allows repeated headers." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.httpGet.withPath + +```ts +withPath(path) +``` + +"Path to access on the HTTP server." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.httpGet.withPort + +```ts +withPort(port) +``` + +"Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.httpGet.withScheme + +```ts +withScheme(scheme) +``` + +"Scheme to use for connecting to the host.\nDefaults to HTTP." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.httpGet.httpHeaders + +"Custom headers to set in the request. HTTP allows repeated headers." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.httpGet.httpHeaders.withName + +```ts +withName(name) +``` + +"The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.httpGet.httpHeaders.withValue + +```ts +withValue(value) +``` + +"The header field value" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.tcpSocket + +"TCPSocket specifies an action involving a TCP port." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.tcpSocket.withHost + +```ts +withHost(host) +``` + +"Optional: Host name to connect to, defaults to the pod IP." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.startupProbe.tcpSocket.withPort + +```ts +withPort(port) +``` + +"Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.volumeDevices + +"volumeDevices is the list of block devices to be used by the container." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.volumeDevices.withDevicePath + +```ts +withDevicePath(devicePath) +``` + +"devicePath is the path inside of the container that the device will be mapped to." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.volumeDevices.withName + +```ts +withName(name) +``` + +"name must match the name of a persistentVolumeClaim in the pod" + +## obj spec.deploymentTemplate.spec.template.spec.initContainers.volumeMounts + +"Pod volumes to mount into the container's filesystem.\nCannot be updated." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.volumeMounts.withMountPath + +```ts +withMountPath(mountPath) +``` + +"Path within the container at which the volume should be mounted. Must\nnot contain ':'." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.volumeMounts.withMountPropagation + +```ts +withMountPropagation(mountPropagation) +``` + +"mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10.\nWhen RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified\n(which defaults to None)." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.volumeMounts.withName + +```ts +withName(name) +``` + +"This must match the Name of a Volume." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.volumeMounts.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.volumeMounts.withRecursiveReadOnly + +```ts +withRecursiveReadOnly(recursiveReadOnly) +``` + +"RecursiveReadOnly specifies whether read-only mounts should be handled\nrecursively.\n\n\nIf ReadOnly is false, this field has no meaning and must be unspecified.\n\n\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made\nrecursively read-only. If this field is set to IfPossible, the mount is made\nrecursively read-only, if it is supported by the container runtime. If this\nfield is set to Enabled, the mount is made recursively read-only if it is\nsupported by the container runtime, otherwise the pod will not be started and\nan error will be generated to indicate the reason.\n\n\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to\nNone (or be unspecified, which defaults to None).\n\n\nIf this field is not specified, it is treated as an equivalent of Disabled." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.volumeMounts.withSubPath + +```ts +withSubPath(subPath) +``` + +"Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root)." + +### fn spec.deploymentTemplate.spec.template.spec.initContainers.volumeMounts.withSubPathExpr + +```ts +withSubPathExpr(subPathExpr) +``` + +"Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive." + +## obj spec.deploymentTemplate.spec.template.spec.os + +"Specifies the OS of the containers in the pod.\nSome pod and container fields are restricted if this is set.\n\n\nIf the OS field is set to linux, the following fields must be unset:\n-securityContext.windowsOptions\n\n\nIf the OS field is set to windows, following fields must be unset:\n- spec.hostPID\n- spec.hostIPC\n- spec.hostUsers\n- spec.securityContext.appArmorProfile\n- spec.securityContext.seLinuxOptions\n- spec.securityContext.seccompProfile\n- spec.securityContext.fsGroup\n- spec.securityContext.fsGroupChangePolicy\n- spec.securityContext.sysctls\n- spec.shareProcessNamespace\n- spec.securityContext.runAsUser\n- spec.securityContext.runAsGroup\n- spec.securityContext.supplementalGroups\n- spec.containers[*].securityContext.appArmorProfile\n- spec.containers[*].securityContext.seLinuxOptions\n- spec.containers[*].securityContext.seccompProfile\n- spec.containers[*].securityContext.capabilities\n- spec.containers[*].securityContext.readOnlyRootFilesystem\n- spec.containers[*].securityContext.privileged\n- spec.containers[*].securityContext.allowPrivilegeEscalation\n- spec.containers[*].securityContext.procMount\n- spec.containers[*].securityContext.runAsUser\n- spec.containers[*].securityContext.runAsGroup" + +### fn spec.deploymentTemplate.spec.template.spec.os.withName + +```ts +withName(name) +``` + +"Name is the name of the operating system. The currently supported values are linux and windows.\nAdditional value may be defined in future and can be one of:\nhttps://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration\nClients should expect to handle additional values and treat unrecognized values in this field as os: null" + +## obj spec.deploymentTemplate.spec.template.spec.readinessGates + +"If specified, all readiness gates will be evaluated for pod readiness.\nA pod is ready when all its containers are ready AND\nall conditions specified in the readiness gates have status equal to \"True\"\nMore info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates" + +### fn spec.deploymentTemplate.spec.template.spec.readinessGates.withConditionType + +```ts +withConditionType(conditionType) +``` + +"ConditionType refers to a condition in the pod's condition list with matching type." + +## obj spec.deploymentTemplate.spec.template.spec.resourceClaims + +"ResourceClaims defines which ResourceClaims must be allocated\nand reserved before the Pod is allowed to start. The resources\nwill be made available to those containers which consume them\nby name.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable." + +### fn spec.deploymentTemplate.spec.template.spec.resourceClaims.withName + +```ts +withName(name) +``` + +"Name uniquely identifies this resource claim inside the pod.\nThis must be a DNS_LABEL." + +## obj spec.deploymentTemplate.spec.template.spec.resourceClaims.source + +"Source describes where to find the ResourceClaim." + +### fn spec.deploymentTemplate.spec.template.spec.resourceClaims.source.withResourceClaimName + +```ts +withResourceClaimName(resourceClaimName) +``` + +"ResourceClaimName is the name of a ResourceClaim object in the same\nnamespace as this pod." + +### fn spec.deploymentTemplate.spec.template.spec.resourceClaims.source.withResourceClaimTemplateName + +```ts +withResourceClaimTemplateName(resourceClaimTemplateName) +``` + +"ResourceClaimTemplateName is the name of a ResourceClaimTemplate\nobject in the same namespace as this pod.\n\n\nThe template will be used to create a new ResourceClaim, which will\nbe bound to this pod. When this pod is deleted, the ResourceClaim\nwill also be deleted. The pod name and resource name, along with a\ngenerated component, will be used to form a unique name for the\nResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\n\nThis field is immutable and no changes will be made to the\ncorresponding ResourceClaim by the control plane after creating the\nResourceClaim." + +## obj spec.deploymentTemplate.spec.template.spec.schedulingGates + +"SchedulingGates is an opaque list of values that if specified will block scheduling the pod.\nIf schedulingGates is not empty, the pod will stay in the SchedulingGated state and the\nscheduler will not attempt to schedule the pod.\n\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards." + +### fn spec.deploymentTemplate.spec.template.spec.schedulingGates.withName + +```ts +withName(name) +``` + +"Name of the scheduling gate.\nEach scheduling gate must have a unique name field." + +## obj spec.deploymentTemplate.spec.template.spec.securityContext + +"SecurityContext holds pod-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.withFsGroup + +```ts +withFsGroup(fsGroup) +``` + +"A special supplemental group that applies to all containers in a pod.\nSome volume types allow the Kubelet to change the ownership of that volume\nto be owned by the pod:\n\n\n1. The owning GID will be the FSGroup\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\n3. The permission bits are OR'd with rw-rw----\n\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.withFsGroupChangePolicy + +```ts +withFsGroupChangePolicy(fsGroupChangePolicy) +``` + +"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume\nbefore being exposed inside Pod. This field will only apply to\nvolume types which support fsGroup based ownership(and permissions).\nIt will have no effect on ephemeral volume types such as: secret, configmaps\nand emptydir.\nValid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.withRunAsGroup + +```ts +withRunAsGroup(runAsGroup) +``` + +"The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.withRunAsNonRoot + +```ts +withRunAsNonRoot(runAsNonRoot) +``` + +"Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.withRunAsUser + +```ts +withRunAsUser(runAsUser) +``` + +"The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.withSupplementalGroups + +```ts +withSupplementalGroups(supplementalGroups) +``` + +"A list of groups applied to the first process run in each container, in addition\nto the container's primary GID, the fsGroup (if specified), and group memberships\ndefined in the container image for the uid of the container process. If unspecified,\nno additional groups are added to any container. Note that group memberships\ndefined in the container image for the uid of the container process are still effective,\neven if they are not included in this list.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.withSupplementalGroupsMixin + +```ts +withSupplementalGroupsMixin(supplementalGroups) +``` + +"A list of groups applied to the first process run in each container, in addition\nto the container's primary GID, the fsGroup (if specified), and group memberships\ndefined in the container image for the uid of the container process. If unspecified,\nno additional groups are added to any container. Note that group memberships\ndefined in the container image for the uid of the container process are still effective,\neven if they are not included in this list.\nNote that this field cannot be set when spec.os.name is windows." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.withSysctls + +```ts +withSysctls(sysctls) +``` + +"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.withSysctlsMixin + +```ts +withSysctlsMixin(sysctls) +``` + +"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\nNote that this field cannot be set when spec.os.name is windows." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.securityContext.appArmorProfile + +"appArmorProfile is the AppArmor options to use by the containers in this pod.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.appArmorProfile.withLocalhostProfile + +```ts +withLocalhostProfile(localhostProfile) +``` + +"localhostProfile indicates a profile loaded on the node that should be used.\nThe profile must be preconfigured on the node to work.\nMust match the loaded name of the profile.\nMust be set if and only if type is \"Localhost\"." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.appArmorProfile.withType + +```ts +withType(type) +``` + +"type indicates which kind of AppArmor profile will be applied.\nValid options are:\n Localhost - a profile pre-loaded on the node.\n RuntimeDefault - the container runtime's default profile.\n Unconfined - no AppArmor enforcement." + +## obj spec.deploymentTemplate.spec.template.spec.securityContext.seLinuxOptions + +"The SELinux context to be applied to all containers.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in SecurityContext. If set in\nboth SecurityContext and PodSecurityContext, the value specified in SecurityContext\ntakes precedence for that container.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.seLinuxOptions.withLevel + +```ts +withLevel(level) +``` + +"Level is SELinux level label that applies to the container." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.seLinuxOptions.withRole + +```ts +withRole(role) +``` + +"Role is a SELinux role label that applies to the container." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.seLinuxOptions.withType + +```ts +withType(type) +``` + +"Type is a SELinux type label that applies to the container." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.seLinuxOptions.withUser + +```ts +withUser(user) +``` + +"User is a SELinux user label that applies to the container." + +## obj spec.deploymentTemplate.spec.template.spec.securityContext.seccompProfile + +"The seccomp options to use by the containers in this pod.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.seccompProfile.withLocalhostProfile + +```ts +withLocalhostProfile(localhostProfile) +``` + +"localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.seccompProfile.withType + +```ts +withType(type) +``` + +"type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." + +## obj spec.deploymentTemplate.spec.template.spec.securityContext.sysctls + +"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\nNote that this field cannot be set when spec.os.name is windows." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.sysctls.withName + +```ts +withName(name) +``` + +"Name of a property to set" + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.sysctls.withValue + +```ts +withValue(value) +``` + +"Value of a property to set" + +## obj spec.deploymentTemplate.spec.template.spec.securityContext.windowsOptions + +"The Windows specific settings applied to all containers.\nIf unspecified, the options within a container's SecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.windowsOptions.withGmsaCredentialSpec + +```ts +withGmsaCredentialSpec(gmsaCredentialSpec) +``` + +"GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.windowsOptions.withGmsaCredentialSpecName + +```ts +withGmsaCredentialSpecName(gmsaCredentialSpecName) +``` + +"GMSACredentialSpecName is the name of the GMSA credential spec to use." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.windowsOptions.withHostProcess + +```ts +withHostProcess(hostProcess) +``` + +"HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." + +### fn spec.deploymentTemplate.spec.template.spec.securityContext.windowsOptions.withRunAsUserName + +```ts +withRunAsUserName(runAsUserName) +``` + +"The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." + +## obj spec.deploymentTemplate.spec.template.spec.tolerations + +"If specified, the pod's tolerations." + +### fn spec.deploymentTemplate.spec.template.spec.tolerations.withEffect + +```ts +withEffect(effect) +``` + +"Effect indicates the taint effect to match. Empty means match all taint effects.\nWhen specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute." + +### fn spec.deploymentTemplate.spec.template.spec.tolerations.withKey + +```ts +withKey(key) +``` + +"Key is the taint key that the toleration applies to. Empty means match all taint keys.\nIf the key is empty, operator must be Exists; this combination means to match all values and all keys." + +### fn spec.deploymentTemplate.spec.template.spec.tolerations.withOperator + +```ts +withOperator(operator) +``` + +"Operator represents a key's relationship to the value.\nValid operators are Exists and Equal. Defaults to Equal.\nExists is equivalent to wildcard for value, so that a pod can\ntolerate all taints of a particular category." + +### fn spec.deploymentTemplate.spec.template.spec.tolerations.withTolerationSeconds + +```ts +withTolerationSeconds(tolerationSeconds) +``` + +"TolerationSeconds represents the period of time the toleration (which must be\nof effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,\nit is not set, which means tolerate the taint forever (do not evict). Zero and\nnegative values will be treated as 0 (evict immediately) by the system." + +### fn spec.deploymentTemplate.spec.template.spec.tolerations.withValue + +```ts +withValue(value) +``` + +"Value is the taint value the toleration matches to.\nIf the operator is Exists, the value should be empty, otherwise just a regular string." + +## obj spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints + +"TopologySpreadConstraints describes how a group of pods ought to spread across topology\ndomains. Scheduler will schedule pods in a way which abides by the constraints.\nAll topologySpreadConstraints are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.withMatchLabelKeys + +```ts +withMatchLabelKeys(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select the pods over which\nspreading will be calculated. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are ANDed with labelSelector\nto select the group of existing pods over which spreading will be calculated\nfor the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nMatchLabelKeys cannot be set when LabelSelector isn't set.\nKeys that don't exist in the incoming pod labels will\nbe ignored. A null or empty list means only match against labelSelector.\n\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default)." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.withMatchLabelKeysMixin + +```ts +withMatchLabelKeysMixin(matchLabelKeys) +``` + +"MatchLabelKeys is a set of pod label keys to select the pods over which\nspreading will be calculated. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are ANDed with labelSelector\nto select the group of existing pods over which spreading will be calculated\nfor the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nMatchLabelKeys cannot be set when LabelSelector isn't set.\nKeys that don't exist in the incoming pod labels will\nbe ignored. A null or empty list means only match against labelSelector.\n\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default)." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.withMaxSkew + +```ts +withMaxSkew(maxSkew) +``` + +"MaxSkew describes the degree to which pods may be unevenly distributed.\nWhen `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference\nbetween the number of matching pods in the target topology and the global minimum.\nThe global minimum is the minimum number of matching pods in an eligible domain\nor zero if the number of eligible domains is less than MinDomains.\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\nlabelSelector spread as 2/2/1:\nIn this case, the global minimum is 1.\n| zone1 | zone2 | zone3 |\n| P P | P P | P |\n- if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;\nscheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)\nviolate MaxSkew(1).\n- if MaxSkew is 2, incoming pod can be scheduled onto any zone.\nWhen `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence\nto topologies that satisfy it.\nIt's a required field. Default value is 1 and 0 is not allowed." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.withMinDomains + +```ts +withMinDomains(minDomains) +``` + +"MinDomains indicates a minimum number of eligible domains.\nWhen the number of eligible domains with matching topology keys is less than minDomains,\nPod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed.\nAnd when the number of eligible domains with matching topology keys equals or greater than minDomains,\nthis value has no effect on scheduling.\nAs a result, when the number of eligible domains is less than minDomains,\nscheduler won't schedule more than maxSkew Pods to those domains.\nIf value is nil, the constraint behaves as if MinDomains is equal to 1.\nValid values are integers greater than 0.\nWhen value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same\nlabelSelector spread as 2/2/2:\n| zone1 | zone2 | zone3 |\n| P P | P P | P P |\nThe number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0.\nIn this situation, new pod with the same labelSelector cannot be scheduled,\nbecause computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,\nit will violate MaxSkew." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.withNodeAffinityPolicy + +```ts +withNodeAffinityPolicy(nodeAffinityPolicy) +``` + +"NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector\nwhen calculating pod topology spread skew. Options are:\n- Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.\n- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\n\nIf this value is nil, the behavior is equivalent to the Honor policy.\nThis is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.withNodeTaintsPolicy + +```ts +withNodeTaintsPolicy(nodeTaintsPolicy) +``` + +"NodeTaintsPolicy indicates how we will treat node taints when calculating\npod topology spread skew. Options are:\n- Honor: nodes without taints, along with tainted nodes for which the incoming pod\nhas a toleration, are included.\n- Ignore: node taints are ignored. All nodes are included.\n\n\nIf this value is nil, the behavior is equivalent to the Ignore policy.\nThis is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.withTopologyKey + +```ts +withTopologyKey(topologyKey) +``` + +"TopologyKey is the key of node labels. Nodes that have a label with this key\nand identical values are considered to be in the same topology.\nWe consider each as a \"bucket\", and try to put balanced number\nof pods into each bucket.\nWe define a domain as a particular instance of a topology.\nAlso, we define an eligible domain as a domain whose nodes meet the requirements of\nnodeAffinityPolicy and nodeTaintsPolicy.\ne.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology.\nAnd, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology.\nIt's a required field." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.withWhenUnsatisfiable + +```ts +withWhenUnsatisfiable(whenUnsatisfiable) +``` + +"WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy\nthe spread constraint.\n- DoNotSchedule (default) tells the scheduler not to schedule it.\n- ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod\nif and only if every possible node assignment for that pod would violate\n\"MaxSkew\" on some topology.\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\nlabelSelector spread as 3/1/1:\n| zone1 | zone2 | zone3 |\n| P P P | P | P |\nIf WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled\nto zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies\nMaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler\nwon't make it *more* imbalanced.\nIt's a required field." + +## obj spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.labelSelector + +"LabelSelector is used to find matching pods.\nPods that match this label selector are counted to determine the number of pods\nin their corresponding topology domain." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.labelSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.labelSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.labelSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.labelSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.labelSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.labelSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.labelSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.labelSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.deploymentTemplate.spec.template.spec.topologySpreadConstraints.labelSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.volumes + +"List of volumes that can be mounted by containers belonging to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.withName + +```ts +withName(name) +``` + +"name of the volume.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.awsElasticBlockStore + +"awsElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.awsElasticBlockStore.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.awsElasticBlockStore.withPartition + +```ts +withPartition(partition) +``` + +"partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty)." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.awsElasticBlockStore.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly value true will force the readOnly setting in VolumeMounts.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.awsElasticBlockStore.withVolumeID + +```ts +withVolumeID(volumeID) +``` + +"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.azureDisk + +"azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.azureDisk.withCachingMode + +```ts +withCachingMode(cachingMode) +``` + +"cachingMode is the Host Caching mode: None, Read Only, Read Write." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.azureDisk.withDiskName + +```ts +withDiskName(diskName) +``` + +"diskName is the Name of the data disk in the blob storage" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.azureDisk.withDiskURI + +```ts +withDiskURI(diskURI) +``` + +"diskURI is the URI of data disk in the blob storage" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.azureDisk.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.azureDisk.withKind + +```ts +withKind(kind) +``` + +"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.azureDisk.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.azureFile + +"azureFile represents an Azure File Service mount on the host and bind mount to the pod." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.azureFile.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.azureFile.withSecretName + +```ts +withSecretName(secretName) +``` + +"secretName is the name of secret that contains Azure Storage Account Name and Key" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.azureFile.withShareName + +```ts +withShareName(shareName) +``` + +"shareName is the azure share Name" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.cephfs + +"cephFS represents a Ceph FS mount on the host that shares a pod's lifetime" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.cephfs.withMonitors + +```ts +withMonitors(monitors) +``` + +"monitors is Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.cephfs.withMonitorsMixin + +```ts +withMonitorsMixin(monitors) +``` + +"monitors is Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.cephfs.withPath + +```ts +withPath(path) +``` + +"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.cephfs.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.cephfs.withSecretFile + +```ts +withSecretFile(secretFile) +``` + +"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.cephfs.withUser + +```ts +withUser(user) +``` + +"user is optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.cephfs.secretRef + +"secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.cephfs.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.cinder + +"cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.cinder.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.cinder.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.cinder.withVolumeID + +```ts +withVolumeID(volumeID) +``` + +"volumeID used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.cinder.secretRef + +"secretRef is optional: points to a secret object containing parameters used to connect\nto OpenStack." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.cinder.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.configMap + +"configMap represents a configMap that should populate this volume" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.configMap.withDefaultMode + +```ts +withDefaultMode(defaultMode) +``` + +"defaultMode is optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.configMap.withItems + +```ts +withItems(items) +``` + +"items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.configMap.withItemsMixin + +```ts +withItemsMixin(items) +``` + +"items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.configMap.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.configMap.withOptional + +```ts +withOptional(optional) +``` + +"optional specify whether the ConfigMap or its keys must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.configMap.items + +"items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.configMap.items.withKey + +```ts +withKey(key) +``` + +"key is the key to project." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.configMap.items.withMode + +```ts +withMode(mode) +``` + +"mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.configMap.items.withPath + +```ts +withPath(path) +``` + +"path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.csi + +"csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature)." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.csi.withDriver + +```ts +withDriver(driver) +``` + +"driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.csi.withFsType + +```ts +withFsType(fsType) +``` + +"fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.csi.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly specifies a read-only configuration for the volume.\nDefaults to false (read/write)." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.csi.withVolumeAttributes + +```ts +withVolumeAttributes(volumeAttributes) +``` + +"volumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.csi.withVolumeAttributesMixin + +```ts +withVolumeAttributesMixin(volumeAttributes) +``` + +"volumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.volumes.csi.nodePublishSecretRef + +"nodePublishSecretRef is a reference to the secret object containing\nsensitive information to pass to the CSI driver to complete the CSI\nNodePublishVolume and NodeUnpublishVolume calls.\nThis field is optional, and may be empty if no secret is required. If the\nsecret object contains more than one secret, all secret references are passed." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.csi.nodePublishSecretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI + +"downwardAPI represents downward API about the pod that should populate this volume" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.withDefaultMode + +```ts +withDefaultMode(defaultMode) +``` + +"Optional: mode bits to use on created files by default. Must be a\nOptional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.withItems + +```ts +withItems(items) +``` + +"Items is a list of downward API volume file" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.withItemsMixin + +```ts +withItemsMixin(items) +``` + +"Items is a list of downward API volume file" + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items + +"Items is a list of downward API volume file" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items.withMode + +```ts +withMode(mode) +``` + +"Optional: mode bits used to set permissions on this file, must be an octal value\nbetween 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items.withPath + +```ts +withPath(path) +``` + +"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items.fieldRef + +"Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items.fieldRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items.fieldRef.withFieldPath + +```ts +withFieldPath(fieldPath) +``` + +"Path of the field to select in the specified API version." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items.resourceFieldRef + +"Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items.resourceFieldRef.withContainerName + +```ts +withContainerName(containerName) +``` + +"Container name: required for volumes, optional for env vars" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items.resourceFieldRef.withDivisor + +```ts +withDivisor(divisor) +``` + +"Specifies the output format of the exposed resources, defaults to \"1\ + +### fn spec.deploymentTemplate.spec.template.spec.volumes.downwardAPI.items.resourceFieldRef.withResource + +```ts +withResource(resource) +``` + +"Required: resource to select" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.emptyDir + +"emptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.emptyDir.withMedium + +```ts +withMedium(medium) +``` + +"medium represents what type of storage medium should back this directory.\nThe default is \"\" which means to use the node's default medium.\nMust be an empty string (default) or Memory.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.emptyDir.withSizeLimit + +```ts +withSizeLimit(sizeLimit) +``` + +"sizeLimit is the total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral + +"ephemeral represents a volume that is handled by a cluster storage driver.\nThe volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,\nand deleted when the pod is removed.\n\n\nUse this if:\na) the volume is only needed while the pod runs,\nb) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and\nd) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\n\nUse PersistentVolumeClaim or one of the vendor-specific\nAPIs for volumes that persist for longer than the lifecycle\nof an individual pod.\n\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to\nbe used that way - see the documentation of the driver for\nmore information.\n\n\nA pod can use both types of ephemeral volumes and\npersistent volumes at the same time." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate + +"Will be used to create a stand-alone PVC to provision the volume.\nThe pod in which this EphemeralVolumeSource is embedded will be the\nowner of the PVC, i.e. the PVC will be deleted together with the\npod. The name of the PVC will be `-` where\n`` is the name from the `PodSpec.Volumes` array\nentry. Pod validation will reject the pod if the concatenated name\nis not valid for a PVC (for example, too long).\n\n\nAn existing PVC with that name that is not owned by the pod\nwill *not* be used for the pod to avoid using an unrelated\nvolume by mistake. Starting the pod is then blocked until\nthe unrelated PVC is removed. If such a pre-created PVC is\nmeant to be used by the pod, the PVC has to updated with an\nowner reference to the pod once the pod exists. Normally\nthis should not be necessary, but it may be useful when\nmanually reconstructing a broken cluster.\n\n\nThis field is read-only and no changes will be made by Kubernetes\nto the PVC after it has been created.\n\n\nRequired, must not be nil." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.metadata + +"May contain labels and annotations that will be copied into the PVC\nwhen creating it. No other fields are allowed and will be rejected during\nvalidation." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + + + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + + + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + + + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + + + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.metadata.withLabels + +```ts +withLabels(labels) +``` + + + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + + + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.metadata.withName + +```ts +withName(name) +``` + + + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.metadata.withNamespace + +```ts +withNamespace(namespace) +``` + + + +## obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec + +"The specification for the PersistentVolumeClaim. The entire content is\ncopied unchanged into the PVC that gets created from this\ntemplate. The same fields as in a PersistentVolumeClaim\nare also valid here." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.withAccessModes + +```ts +withAccessModes(accessModes) +``` + +"accessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.withAccessModesMixin + +```ts +withAccessModesMixin(accessModes) +``` + +"accessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.withStorageClassName + +```ts +withStorageClassName(storageClassName) +``` + +"storageClassName is the name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.withVolumeAttributesClassName + +```ts +withVolumeAttributesClassName(volumeAttributesClassName) +``` + +"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.\nIf specified, the CSI driver will create or update the volume with the attributes defined\nin the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,\nit can be changed after the claim is created. An empty string value means that no VolumeAttributesClass\nwill be applied to the claim but it's not allowed to reset this field to empty string once it is set.\nIf unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass\nwill be set by the persistentvolume controller if it exists.\nIf the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be\nset to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource\nexists.\nMore info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/\n(Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.withVolumeMode + +```ts +withVolumeMode(volumeMode) +``` + +"volumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.withVolumeName + +```ts +withVolumeName(volumeName) +``` + +"volumeName is the binding reference to the PersistentVolume backing this claim." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSource + +"dataSource field can be used to specify either:\n* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)\n* An existing PVC (PersistentVolumeClaim)\nIf the provisioner or an external controller can support the specified data source,\nit will create a new volume based on the contents of the specified data source.\nWhen the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,\nand dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.\nIf the namespace is specified, then dataSourceRef will not be copied to dataSource." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSource.withApiGroup + +```ts +withApiGroup(apiGroup) +``` + +"APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSource.withKind + +```ts +withKind(kind) +``` + +"Kind is the type of resource being referenced" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSource.withName + +```ts +withName(name) +``` + +"Name is the name of resource being referenced" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSourceRef + +"dataSourceRef specifies the object from which to populate the volume with data, if a non-empty\nvolume is desired. This may be any object from a non-empty API group (non\ncore object) or a PersistentVolumeClaim object.\nWhen this field is specified, volume binding will only succeed if the type of\nthe specified object matches some installed volume populator or dynamic\nprovisioner.\nThis field will replace the functionality of the dataSource field and as such\nif both fields are non-empty, they must have the same value. For backwards\ncompatibility, when namespace isn't specified in dataSourceRef,\nboth fields (dataSource and dataSourceRef) will be set to the same\nvalue automatically if one of them is empty and the other is non-empty.\nWhen namespace is specified in dataSourceRef,\ndataSource isn't set to the same value and must be empty.\nThere are three important differences between dataSource and dataSourceRef:\n* While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\n(Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSourceRef.withApiGroup + +```ts +withApiGroup(apiGroup) +``` + +"APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSourceRef.withKind + +```ts +withKind(kind) +``` + +"Kind is the type of resource being referenced" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSourceRef.withName + +```ts +withName(name) +``` + +"Name is the name of resource being referenced" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.dataSourceRef.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace is the namespace of resource being referenced\nNote that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.\n(Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.resources + +"resources represents the minimum resources the volume should have.\nIf RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements\nthat are lower than previous value but must still be higher than capacity recorded in the\nstatus field of the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.resources.withLimits + +```ts +withLimits(limits) +``` + +"Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.resources.withLimitsMixin + +```ts +withLimitsMixin(limits) +``` + +"Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.resources.withRequests + +```ts +withRequests(requests) +``` + +"Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.resources.withRequestsMixin + +```ts +withRequestsMixin(requests) +``` + +"Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.selector + +"selector is a label query over volumes to consider for binding." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.ephemeral.volumeClaimTemplate.spec.selector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.volumes.fc + +"fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.fc.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.fc.withLun + +```ts +withLun(lun) +``` + +"lun is Optional: FC target lun number" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.fc.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.fc.withTargetWWNs + +```ts +withTargetWWNs(targetWWNs) +``` + +"targetWWNs is Optional: FC target worldwide names (WWNs)" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.fc.withTargetWWNsMixin + +```ts +withTargetWWNsMixin(targetWWNs) +``` + +"targetWWNs is Optional: FC target worldwide names (WWNs)" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.fc.withWwids + +```ts +withWwids(wwids) +``` + +"wwids Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.fc.withWwidsMixin + +```ts +withWwidsMixin(wwids) +``` + +"wwids Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.volumes.flexVolume + +"flexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.flexVolume.withDriver + +```ts +withDriver(driver) +``` + +"driver is the name of the driver to use for this volume." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.flexVolume.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.flexVolume.withOptions + +```ts +withOptions(options) +``` + +"options is Optional: this field holds extra command options if any." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.flexVolume.withOptionsMixin + +```ts +withOptionsMixin(options) +``` + +"options is Optional: this field holds extra command options if any." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.flexVolume.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly is Optional: defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.flexVolume.secretRef + +"secretRef is Optional: secretRef is reference to the secret object containing\nsensitive information to pass to the plugin scripts. This may be\nempty if no secret object is specified. If the secret object\ncontains more than one secret, all secrets are passed to the plugin\nscripts." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.flexVolume.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.flocker + +"flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.flocker.withDatasetName + +```ts +withDatasetName(datasetName) +``` + +"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker\nshould be considered as deprecated" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.flocker.withDatasetUUID + +```ts +withDatasetUUID(datasetUUID) +``` + +"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.gcePersistentDisk + +"gcePersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.gcePersistentDisk.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.gcePersistentDisk.withPartition + +```ts +withPartition(partition) +``` + +"partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.gcePersistentDisk.withPdName + +```ts +withPdName(pdName) +``` + +"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.gcePersistentDisk.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.gitRepo + +"gitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.gitRepo.withDirectory + +```ts +withDirectory(directory) +``` + +"directory is the target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.gitRepo.withRepository + +```ts +withRepository(repository) +``` + +"repository is the URL" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.gitRepo.withRevision + +```ts +withRevision(revision) +``` + +"revision is the commit hash for the specified revision." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.glusterfs + +"glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.glusterfs.withEndpoints + +```ts +withEndpoints(endpoints) +``` + +"endpoints is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.glusterfs.withPath + +```ts +withPath(path) +``` + +"path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.glusterfs.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.hostPath + +"hostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.hostPath.withPath + +```ts +withPath(path) +``` + +"path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.hostPath.withType + +```ts +withType(type) +``` + +"type for HostPath Volume\nDefaults to \"\"\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.iscsi + +"iscsi represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.k8s.io/volumes/iscsi/README.md" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.iscsi.withChapAuthDiscovery + +```ts +withChapAuthDiscovery(chapAuthDiscovery) +``` + +"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.iscsi.withChapAuthSession + +```ts +withChapAuthSession(chapAuthSession) +``` + +"chapAuthSession defines whether support iSCSI Session CHAP authentication" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.iscsi.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.iscsi.withInitiatorName + +```ts +withInitiatorName(initiatorName) +``` + +"initiatorName is the custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n: will be created for the connection." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.iscsi.withIqn + +```ts +withIqn(iqn) +``` + +"iqn is the target iSCSI Qualified Name." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.iscsi.withIscsiInterface + +```ts +withIscsiInterface(iscsiInterface) +``` + +"iscsiInterface is the interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp)." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.iscsi.withLun + +```ts +withLun(lun) +``` + +"lun represents iSCSI Target Lun number." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.iscsi.withPortals + +```ts +withPortals(portals) +``` + +"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.iscsi.withPortalsMixin + +```ts +withPortalsMixin(portals) +``` + +"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.iscsi.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.iscsi.withTargetPortal + +```ts +withTargetPortal(targetPortal) +``` + +"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.iscsi.secretRef + +"secretRef is the CHAP Secret for iSCSI target and initiator authentication" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.iscsi.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.nfs + +"nfs represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.nfs.withPath + +```ts +withPath(path) +``` + +"path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.nfs.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly here will force the NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.nfs.withServer + +```ts +withServer(server) +``` + +"server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.persistentVolumeClaim + +"persistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.persistentVolumeClaim.withClaimName + +```ts +withClaimName(claimName) +``` + +"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.persistentVolumeClaim.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly Will force the ReadOnly setting in VolumeMounts.\nDefault false." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.photonPersistentDisk + +"photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.photonPersistentDisk.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.photonPersistentDisk.withPdID + +```ts +withPdID(pdID) +``` + +"pdID is the ID that identifies Photon Controller persistent disk" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.portworxVolume + +"portworxVolume represents a portworx volume attached and mounted on kubelets host machine" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.portworxVolume.withFsType + +```ts +withFsType(fsType) +``` + +"fSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.portworxVolume.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.portworxVolume.withVolumeID + +```ts +withVolumeID(volumeID) +``` + +"volumeID uniquely identifies a Portworx volume" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected + +"projected items for all in one resources secrets, configmaps, and downward API" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.withDefaultMode + +```ts +withDefaultMode(defaultMode) +``` + +"defaultMode are the mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.withSources + +```ts +withSources(sources) +``` + +"sources is the list of volume projections" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.withSourcesMixin + +```ts +withSourcesMixin(sources) +``` + +"sources is the list of volume projections" + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources + +"sources is the list of volume projections" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle + +"ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field\nof ClusterTrustBundle objects in an auto-updating file.\n\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\n\nClusterTrustBundle objects can either be selected by name, or by the\ncombination of signer name and a label selector.\n\n\nKubelet performs aggressive normalization of the PEM contents written\ninto the pod filesystem. Esoteric PEM features such as inter-block\ncomments and block headers are stripped. Certificates are deduplicated.\nThe ordering of certificates within the file is arbitrary, and Kubelet\nmay change the order over time." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.withName + +```ts +withName(name) +``` + +"Select a single ClusterTrustBundle by object name. Mutually-exclusive\nwith signerName and labelSelector." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.withOptional + +```ts +withOptional(optional) +``` + +"If true, don't block pod startup if the referenced ClusterTrustBundle(s)\naren't available. If using name, then the named ClusterTrustBundle is\nallowed not to exist. If using signerName, then the combination of\nsignerName and labelSelector is allowed to match zero\nClusterTrustBundles." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.withPath + +```ts +withPath(path) +``` + +"Relative path from the volume root to write the bundle." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.withSignerName + +```ts +withSignerName(signerName) +``` + +"Select all ClusterTrustBundles that match this signer name.\nMutually-exclusive with name. The contents of all selected\nClusterTrustBundles will be unified and deduplicated." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.labelSelector + +"Select all ClusterTrustBundles that match this label selector. Only has\neffect if signerName is set. Mutually-exclusive with name. If unset,\ninterpreted as \"match nothing\". If set but empty, interpreted as \"match\neverything\"." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.labelSelector.withMatchExpressions + +```ts +withMatchExpressions(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.labelSelector.withMatchExpressionsMixin + +```ts +withMatchExpressionsMixin(matchExpressions) +``` + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.labelSelector.withMatchLabels + +```ts +withMatchLabels(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.labelSelector.withMatchLabelsMixin + +```ts +withMatchLabelsMixin(matchLabels) +``` + +"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions + +"matchExpressions is a list of label selector requirements. The requirements are ANDed." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions.withKey + +```ts +withKey(key) +``` + +"key is the label key that the selector applies to." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions.withOperator + +```ts +withOperator(operator) +``` + +"operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions.withValues + +```ts +withValues(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions.withValuesMixin + +```ts +withValuesMixin(values) +``` + +"values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.configMap + +"configMap information about the configMap data to project" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.configMap.withItems + +```ts +withItems(items) +``` + +"items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.configMap.withItemsMixin + +```ts +withItemsMixin(items) +``` + +"items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.configMap.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.configMap.withOptional + +```ts +withOptional(optional) +``` + +"optional specify whether the ConfigMap or its keys must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.configMap.items + +"items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.configMap.items.withKey + +```ts +withKey(key) +``` + +"key is the key to project." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.configMap.items.withMode + +```ts +withMode(mode) +``` + +"mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.configMap.items.withPath + +```ts +withPath(path) +``` + +"path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI + +"downwardAPI information about the downwardAPI data to project" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.withItems + +```ts +withItems(items) +``` + +"Items is a list of DownwardAPIVolume file" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.withItemsMixin + +```ts +withItemsMixin(items) +``` + +"Items is a list of DownwardAPIVolume file" + +**Note:** This function appends passed data to existing values + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items + +"Items is a list of DownwardAPIVolume file" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items.withMode + +```ts +withMode(mode) +``` + +"Optional: mode bits used to set permissions on this file, must be an octal value\nbetween 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items.withPath + +```ts +withPath(path) +``` + +"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items.fieldRef + +"Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items.fieldRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items.fieldRef.withFieldPath + +```ts +withFieldPath(fieldPath) +``` + +"Path of the field to select in the specified API version." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items.resourceFieldRef + +"Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items.resourceFieldRef.withContainerName + +```ts +withContainerName(containerName) +``` + +"Container name: required for volumes, optional for env vars" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items.resourceFieldRef.withDivisor + +```ts +withDivisor(divisor) +``` + +"Specifies the output format of the exposed resources, defaults to \"1\ + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.downwardAPI.items.resourceFieldRef.withResource + +```ts +withResource(resource) +``` + +"Required: resource to select" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.secret + +"secret information about the secret data to project" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.secret.withItems + +```ts +withItems(items) +``` + +"items if unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.secret.withItemsMixin + +```ts +withItemsMixin(items) +``` + +"items if unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.secret.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.secret.withOptional + +```ts +withOptional(optional) +``` + +"optional field specify whether the Secret or its key must be defined" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.secret.items + +"items if unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.secret.items.withKey + +```ts +withKey(key) +``` + +"key is the key to project." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.secret.items.withMode + +```ts +withMode(mode) +``` + +"mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.secret.items.withPath + +```ts +withPath(path) +``` + +"path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.serviceAccountToken + +"serviceAccountToken is information about the serviceAccountToken data to project" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.serviceAccountToken.withAudience + +```ts +withAudience(audience) +``` + +"audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.serviceAccountToken.withExpirationSeconds + +```ts +withExpirationSeconds(expirationSeconds) +``` + +"expirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.projected.sources.serviceAccountToken.withPath + +```ts +withPath(path) +``` + +"path is the path relative to the mount point of the file to project the\ntoken into." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.quobyte + +"quobyte represents a Quobyte mount on the host that shares a pod's lifetime" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.quobyte.withGroup + +```ts +withGroup(group) +``` + +"group to map volume access to\nDefault is no group" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.quobyte.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.quobyte.withRegistry + +```ts +withRegistry(registry) +``` + +"registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.quobyte.withTenant + +```ts +withTenant(tenant) +``` + +"tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.quobyte.withUser + +```ts +withUser(user) +``` + +"user to map volume access to\nDefaults to serivceaccount user" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.quobyte.withVolume + +```ts +withVolume(volume) +``` + +"volume is a string that references an already created Quobyte volume by name." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.rbd + +"rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/rbd/README.md" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.rbd.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.rbd.withImage + +```ts +withImage(image) +``` + +"image is the rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.rbd.withKeyring + +```ts +withKeyring(keyring) +``` + +"keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.rbd.withMonitors + +```ts +withMonitors(monitors) +``` + +"monitors is a collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.rbd.withMonitorsMixin + +```ts +withMonitorsMixin(monitors) +``` + +"monitors is a collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.rbd.withPool + +```ts +withPool(pool) +``` + +"pool is the rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.rbd.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.rbd.withUser + +```ts +withUser(user) +``` + +"user is the rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.rbd.secretRef + +"secretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.rbd.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.scaleIO + +"scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.scaleIO.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\".\nDefault is \"xfs\"." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.scaleIO.withGateway + +```ts +withGateway(gateway) +``` + +"gateway is the host address of the ScaleIO API Gateway." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.scaleIO.withProtectionDomain + +```ts +withProtectionDomain(protectionDomain) +``` + +"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.scaleIO.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.scaleIO.withSslEnabled + +```ts +withSslEnabled(sslEnabled) +``` + +"sslEnabled Flag enable/disable SSL communication with Gateway, default false" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.scaleIO.withStorageMode + +```ts +withStorageMode(storageMode) +``` + +"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.scaleIO.withStoragePool + +```ts +withStoragePool(storagePool) +``` + +"storagePool is the ScaleIO Storage Pool associated with the protection domain." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.scaleIO.withSystem + +```ts +withSystem(system) +``` + +"system is the name of the storage system as configured in ScaleIO." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.scaleIO.withVolumeName + +```ts +withVolumeName(volumeName) +``` + +"volumeName is the name of a volume already created in the ScaleIO system\nthat is associated with this volume source." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.scaleIO.secretRef + +"secretRef references to the secret for ScaleIO user and other\nsensitive information. If this is not provided, Login operation will fail." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.scaleIO.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.secret + +"secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.secret.withDefaultMode + +```ts +withDefaultMode(defaultMode) +``` + +"defaultMode is Optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values\nfor mode bits. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.secret.withItems + +```ts +withItems(items) +``` + +"items If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.secret.withItemsMixin + +```ts +withItemsMixin(items) +``` + +"items If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +**Note:** This function appends passed data to existing values + +### fn spec.deploymentTemplate.spec.template.spec.volumes.secret.withOptional + +```ts +withOptional(optional) +``` + +"optional field specify whether the Secret or its keys must be defined" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.secret.withSecretName + +```ts +withSecretName(secretName) +``` + +"secretName is the name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.secret.items + +"items If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.secret.items.withKey + +```ts +withKey(key) +``` + +"key is the key to project." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.secret.items.withMode + +```ts +withMode(mode) +``` + +"mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.secret.items.withPath + +```ts +withPath(path) +``` + +"path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.storageos + +"storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.storageos.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.storageos.withReadOnly + +```ts +withReadOnly(readOnly) +``` + +"readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.storageos.withVolumeName + +```ts +withVolumeName(volumeName) +``` + +"volumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.storageos.withVolumeNamespace + +```ts +withVolumeNamespace(volumeNamespace) +``` + +"volumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod's namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to \"default\" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created." + +## obj spec.deploymentTemplate.spec.template.spec.volumes.storageos.secretRef + +"secretRef specifies the secret to use for obtaining the StorageOS API\ncredentials. If not specified, default values will be attempted." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.storageos.secretRef.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.deploymentTemplate.spec.template.spec.volumes.vsphereVolume + +"vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine" + +### fn spec.deploymentTemplate.spec.template.spec.volumes.vsphereVolume.withFsType + +```ts +withFsType(fsType) +``` + +"fsType is filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.vsphereVolume.withStoragePolicyID + +```ts +withStoragePolicyID(storagePolicyID) +``` + +"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.vsphereVolume.withStoragePolicyName + +```ts +withStoragePolicyName(storagePolicyName) +``` + +"storagePolicyName is the storage Policy Based Management (SPBM) profile name." + +### fn spec.deploymentTemplate.spec.template.spec.volumes.vsphereVolume.withVolumePath + +```ts +withVolumePath(volumePath) +``` + +"volumePath is the path that identifies vSphere volume vmdk" + +## obj spec.serviceAccountTemplate + +"ServiceAccountTemplate is the template for the ServiceAccount object." + +## obj spec.serviceAccountTemplate.metadata + +"Metadata contains the configurable metadata fields for the ServiceAccount." + +### fn spec.serviceAccountTemplate.metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that\nmay be set by external tools to store and retrieve arbitrary metadata.\nThey are not queryable and should be preserved when modifying objects.\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/" + +### fn spec.serviceAccountTemplate.metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that\nmay be set by external tools to store and retrieve arbitrary metadata.\nThey are not queryable and should be preserved when modifying objects.\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/" + +**Note:** This function appends passed data to existing values + +### fn spec.serviceAccountTemplate.metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. Labels will be merged with internal labels\nused by crossplane, and labels with a crossplane.io key might be\noverwritten.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +### fn spec.serviceAccountTemplate.metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. Labels will be merged with internal labels\nused by crossplane, and labels with a crossplane.io key might be\noverwritten.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +**Note:** This function appends passed data to existing values + +### fn spec.serviceAccountTemplate.metadata.withName + +```ts +withName(name) +``` + +"Name is the name of the object." + +## obj spec.serviceTemplate + +"ServiceTemplate is the template for the Service object." + +## obj spec.serviceTemplate.metadata + +"Metadata contains the configurable metadata fields for the Service." + +### fn spec.serviceTemplate.metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that\nmay be set by external tools to store and retrieve arbitrary metadata.\nThey are not queryable and should be preserved when modifying objects.\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/" + +### fn spec.serviceTemplate.metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that\nmay be set by external tools to store and retrieve arbitrary metadata.\nThey are not queryable and should be preserved when modifying objects.\nMore info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/" + +**Note:** This function appends passed data to existing values + +### fn spec.serviceTemplate.metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. Labels will be merged with internal labels\nused by crossplane, and labels with a crossplane.io key might be\noverwritten.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +### fn spec.serviceTemplate.metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. Labels will be merged with internal labels\nused by crossplane, and labels with a crossplane.io key might be\noverwritten.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +**Note:** This function appends passed data to existing values + +### fn spec.serviceTemplate.metadata.withName + +```ts +withName(name) +``` + +"Name is the name of the object." \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1beta1/function.md b/docs/crossplane/1.17/pkg/v1beta1/function.md new file mode 100644 index 0000000..715625c --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1beta1/function.md @@ -0,0 +1,364 @@ +--- +permalink: /crossplane/1.17/pkg/v1beta1/function/ +--- + +# pkg.v1beta1.function + +"A Function installs an OCI compatible Crossplane package, extending\nCrossplane with support for a new kind of composition function.\n\n\nRead the Crossplane documentation for\n[more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions)." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withCommonLabels(commonLabels)`](#fn-specwithcommonlabels) + * [`fn withCommonLabelsMixin(commonLabels)`](#fn-specwithcommonlabelsmixin) + * [`fn withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints)`](#fn-specwithignorecrossplaneconstraints) + * [`fn withPackage(package)`](#fn-specwithpackage) + * [`fn withPackagePullPolicy(packagePullPolicy)`](#fn-specwithpackagepullpolicy) + * [`fn withPackagePullSecrets(packagePullSecrets)`](#fn-specwithpackagepullsecrets) + * [`fn withPackagePullSecretsMixin(packagePullSecrets)`](#fn-specwithpackagepullsecretsmixin) + * [`fn withRevisionActivationPolicy(revisionActivationPolicy)`](#fn-specwithrevisionactivationpolicy) + * [`fn withRevisionHistoryLimit(revisionHistoryLimit)`](#fn-specwithrevisionhistorylimit) + * [`fn withSkipDependencyResolution(skipDependencyResolution)`](#fn-specwithskipdependencyresolution) + * [`obj spec.controllerConfigRef`](#obj-speccontrollerconfigref) + * [`fn withName(name)`](#fn-speccontrollerconfigrefwithname) + * [`obj spec.packagePullSecrets`](#obj-specpackagepullsecrets) + * [`fn withName(name)`](#fn-specpackagepullsecretswithname) + * [`obj spec.runtimeConfigRef`](#obj-specruntimeconfigref) + * [`fn withApiVersion(apiVersion)`](#fn-specruntimeconfigrefwithapiversion) + * [`fn withKind(kind)`](#fn-specruntimeconfigrefwithkind) + * [`fn withName(name)`](#fn-specruntimeconfigrefwithname) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Function + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"FunctionSpec specifies the configuration of a Function." + +### fn spec.withCommonLabels + +```ts +withCommonLabels(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +### fn spec.withCommonLabelsMixin + +```ts +withCommonLabelsMixin(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +**Note:** This function appends passed data to existing values + +### fn spec.withIgnoreCrossplaneConstraints + +```ts +withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints) +``` + +"IgnoreCrossplaneConstraints indicates to the package manager whether to\nhonor Crossplane version constrains specified by the package.\nDefault is false." + +### fn spec.withPackage + +```ts +withPackage(package) +``` + +"Package is the name of the package that is being requested." + +### fn spec.withPackagePullPolicy + +```ts +withPackagePullPolicy(packagePullPolicy) +``` + +"PackagePullPolicy defines the pull policy for the package.\nDefault is IfNotPresent." + +### fn spec.withPackagePullSecrets + +```ts +withPackagePullSecrets(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be used\nto fetch packages from private registries." + +### fn spec.withPackagePullSecretsMixin + +```ts +withPackagePullSecretsMixin(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be used\nto fetch packages from private registries." + +**Note:** This function appends passed data to existing values + +### fn spec.withRevisionActivationPolicy + +```ts +withRevisionActivationPolicy(revisionActivationPolicy) +``` + +"RevisionActivationPolicy specifies how the package controller should\nupdate from one revision to the next. Options are Automatic or Manual.\nDefault is Automatic." + +### fn spec.withRevisionHistoryLimit + +```ts +withRevisionHistoryLimit(revisionHistoryLimit) +``` + +"RevisionHistoryLimit dictates how the package controller cleans up old\ninactive package revisions.\nDefaults to 1. Can be disabled by explicitly setting to 0." + +### fn spec.withSkipDependencyResolution + +```ts +withSkipDependencyResolution(skipDependencyResolution) +``` + +"SkipDependencyResolution indicates to the package manager whether to skip\nresolving dependencies for a package. Setting this value to true may have\nunintended consequences.\nDefault is false." + +## obj spec.controllerConfigRef + +"ControllerConfigRef references a ControllerConfig resource that will be\nused to configure the packaged controller Deployment.\nDeprecated: Use RuntimeConfigReference instead." + +### fn spec.controllerConfigRef.withName + +```ts +withName(name) +``` + +"Name of the ControllerConfig." + +## obj spec.packagePullSecrets + +"PackagePullSecrets are named secrets in the same namespace that can be used\nto fetch packages from private registries." + +### fn spec.packagePullSecrets.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.runtimeConfigRef + +"RuntimeConfigRef references a RuntimeConfig resource that will be used\nto configure the package runtime." + +### fn spec.runtimeConfigRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"API version of the referent." + +### fn spec.runtimeConfigRef.withKind + +```ts +withKind(kind) +``` + +"Kind of the referent." + +### fn spec.runtimeConfigRef.withName + +```ts +withName(name) +``` + +"Name of the RuntimeConfig." \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1beta1/functionRevision.md b/docs/crossplane/1.17/pkg/v1beta1/functionRevision.md new file mode 100644 index 0000000..f1aa568 --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1beta1/functionRevision.md @@ -0,0 +1,382 @@ +--- +permalink: /crossplane/1.17/pkg/v1beta1/functionRevision/ +--- + +# pkg.v1beta1.functionRevision + +"A FunctionRevision represents a revision of a Function. Crossplane\ncreates new revisions when there are changes to the Function.\n\n\nCrossplane creates and manages FunctionRevisions. Don't directly edit\nFunctionRevisions." + +## Index + +* [`fn new(name)`](#fn-new) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj spec`](#obj-spec) + * [`fn withCommonLabels(commonLabels)`](#fn-specwithcommonlabels) + * [`fn withCommonLabelsMixin(commonLabels)`](#fn-specwithcommonlabelsmixin) + * [`fn withDesiredState(desiredState)`](#fn-specwithdesiredstate) + * [`fn withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints)`](#fn-specwithignorecrossplaneconstraints) + * [`fn withImage(image)`](#fn-specwithimage) + * [`fn withPackagePullPolicy(packagePullPolicy)`](#fn-specwithpackagepullpolicy) + * [`fn withPackagePullSecrets(packagePullSecrets)`](#fn-specwithpackagepullsecrets) + * [`fn withPackagePullSecretsMixin(packagePullSecrets)`](#fn-specwithpackagepullsecretsmixin) + * [`fn withRevision(revision)`](#fn-specwithrevision) + * [`fn withSkipDependencyResolution(skipDependencyResolution)`](#fn-specwithskipdependencyresolution) + * [`fn withTlsClientSecretName(tlsClientSecretName)`](#fn-specwithtlsclientsecretname) + * [`fn withTlsServerSecretName(tlsServerSecretName)`](#fn-specwithtlsserversecretname) + * [`obj spec.controllerConfigRef`](#obj-speccontrollerconfigref) + * [`fn withName(name)`](#fn-speccontrollerconfigrefwithname) + * [`obj spec.packagePullSecrets`](#obj-specpackagepullsecrets) + * [`fn withName(name)`](#fn-specpackagepullsecretswithname) + * [`obj spec.runtimeConfigRef`](#obj-specruntimeconfigref) + * [`fn withApiVersion(apiVersion)`](#fn-specruntimeconfigrefwithapiversion) + * [`fn withKind(kind)`](#fn-specruntimeconfigrefwithkind) + * [`fn withName(name)`](#fn-specruntimeconfigrefwithname) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of FunctionRevision + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj spec + +"FunctionRevisionSpec specifies configuration for a FunctionRevision." + +### fn spec.withCommonLabels + +```ts +withCommonLabels(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +### fn spec.withCommonLabelsMixin + +```ts +withCommonLabelsMixin(commonLabels) +``` + +"Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" + +**Note:** This function appends passed data to existing values + +### fn spec.withDesiredState + +```ts +withDesiredState(desiredState) +``` + +"DesiredState of the PackageRevision. Can be either Active or Inactive." + +### fn spec.withIgnoreCrossplaneConstraints + +```ts +withIgnoreCrossplaneConstraints(ignoreCrossplaneConstraints) +``` + +"IgnoreCrossplaneConstraints indicates to the package manager whether to\nhonor Crossplane version constrains specified by the package.\nDefault is false." + +### fn spec.withImage + +```ts +withImage(image) +``` + +"Package image used by install Pod to extract package contents." + +### fn spec.withPackagePullPolicy + +```ts +withPackagePullPolicy(packagePullPolicy) +``` + +"PackagePullPolicy defines the pull policy for the package. It is also\napplied to any images pulled for the package, such as a provider's\ncontroller image.\nDefault is IfNotPresent." + +### fn spec.withPackagePullSecrets + +```ts +withPackagePullSecrets(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be\nused to fetch packages from private registries. They are also applied to\nany images pulled for the package, such as a provider's controller image." + +### fn spec.withPackagePullSecretsMixin + +```ts +withPackagePullSecretsMixin(packagePullSecrets) +``` + +"PackagePullSecrets are named secrets in the same namespace that can be\nused to fetch packages from private registries. They are also applied to\nany images pulled for the package, such as a provider's controller image." + +**Note:** This function appends passed data to existing values + +### fn spec.withRevision + +```ts +withRevision(revision) +``` + +"Revision number. Indicates when the revision will be garbage collected\nbased on the parent's RevisionHistoryLimit." + +### fn spec.withSkipDependencyResolution + +```ts +withSkipDependencyResolution(skipDependencyResolution) +``` + +"SkipDependencyResolution indicates to the package manager whether to skip\nresolving dependencies for a package. Setting this value to true may have\nunintended consequences.\nDefault is false." + +### fn spec.withTlsClientSecretName + +```ts +withTlsClientSecretName(tlsClientSecretName) +``` + +"TLSClientSecretName is the name of the TLS Secret that stores client\ncertificates of the Provider." + +### fn spec.withTlsServerSecretName + +```ts +withTlsServerSecretName(tlsServerSecretName) +``` + +"TLSServerSecretName is the name of the TLS Secret that stores server\ncertificates of the Provider." + +## obj spec.controllerConfigRef + +"ControllerConfigRef references a ControllerConfig resource that will be\nused to configure the packaged controller Deployment.\nDeprecated: Use RuntimeConfigReference instead." + +### fn spec.controllerConfigRef.withName + +```ts +withName(name) +``` + +"Name of the ControllerConfig." + +## obj spec.packagePullSecrets + +"PackagePullSecrets are named secrets in the same namespace that can be\nused to fetch packages from private registries. They are also applied to\nany images pulled for the package, such as a provider's controller image." + +### fn spec.packagePullSecrets.withName + +```ts +withName(name) +``` + +"Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" + +## obj spec.runtimeConfigRef + +"RuntimeConfigRef references a RuntimeConfig resource that will be used\nto configure the package runtime." + +### fn spec.runtimeConfigRef.withApiVersion + +```ts +withApiVersion(apiVersion) +``` + +"API version of the referent." + +### fn spec.runtimeConfigRef.withKind + +```ts +withKind(kind) +``` + +"Kind of the referent." + +### fn spec.runtimeConfigRef.withName + +```ts +withName(name) +``` + +"Name of the RuntimeConfig." \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1beta1/index.md b/docs/crossplane/1.17/pkg/v1beta1/index.md new file mode 100644 index 0000000..ad3f075 --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1beta1/index.md @@ -0,0 +1,12 @@ +--- +permalink: /crossplane/1.17/pkg/v1beta1/ +--- + +# pkg.v1beta1 + + + +* [deploymentRuntimeConfig](deploymentRuntimeConfig.md) +* [function](function.md) +* [functionRevision](functionRevision.md) +* [lock](lock.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/pkg/v1beta1/lock.md b/docs/crossplane/1.17/pkg/v1beta1/lock.md new file mode 100644 index 0000000..97f961f --- /dev/null +++ b/docs/crossplane/1.17/pkg/v1beta1/lock.md @@ -0,0 +1,318 @@ +--- +permalink: /crossplane/1.17/pkg/v1beta1/lock/ +--- + +# pkg.v1beta1.lock + +"Lock is the CRD type that tracks package dependencies." + +## Index + +* [`fn new(name)`](#fn-new) +* [`fn withPackages(packages)`](#fn-withpackages) +* [`fn withPackagesMixin(packages)`](#fn-withpackagesmixin) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj packages`](#obj-packages) + * [`fn withDependencies(dependencies)`](#fn-packageswithdependencies) + * [`fn withDependenciesMixin(dependencies)`](#fn-packageswithdependenciesmixin) + * [`fn withName(name)`](#fn-packageswithname) + * [`fn withSource(source)`](#fn-packageswithsource) + * [`fn withType(type)`](#fn-packageswithtype) + * [`fn withVersion(version)`](#fn-packageswithversion) + * [`obj packages.dependencies`](#obj-packagesdependencies) + * [`fn withConstraints(constraints)`](#fn-packagesdependencieswithconstraints) + * [`fn withPackage(package)`](#fn-packagesdependencieswithpackage) + * [`fn withType(type)`](#fn-packagesdependencieswithtype) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Lock + +### fn withPackages + +```ts +withPackages(packages) +``` + + + +### fn withPackagesMixin + +```ts +withPackagesMixin(packages) +``` + + + +**Note:** This function appends passed data to existing values + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj packages + + + +### fn packages.withDependencies + +```ts +withDependencies(dependencies) +``` + +"Dependencies are the list of dependencies of this package. The order of\nthe dependencies will dictate the order in which they are resolved." + +### fn packages.withDependenciesMixin + +```ts +withDependenciesMixin(dependencies) +``` + +"Dependencies are the list of dependencies of this package. The order of\nthe dependencies will dictate the order in which they are resolved." + +**Note:** This function appends passed data to existing values + +### fn packages.withName + +```ts +withName(name) +``` + +"Name corresponds to the name of the package revision for this package." + +### fn packages.withSource + +```ts +withSource(source) +``` + +"Source is the OCI image name without a tag or digest." + +### fn packages.withType + +```ts +withType(type) +``` + +"Type is the type of package. Can be either Configuration or Provider." + +### fn packages.withVersion + +```ts +withVersion(version) +``` + +"Version is the tag or digest of the OCI image." + +## obj packages.dependencies + +"Dependencies are the list of dependencies of this package. The order of\nthe dependencies will dictate the order in which they are resolved." + +### fn packages.dependencies.withConstraints + +```ts +withConstraints(constraints) +``` + +"Constraints is a valid semver range, which will be used to select a valid\ndependency version." + +### fn packages.dependencies.withPackage + +```ts +withPackage(package) +``` + +"Package is the OCI image name without a tag or digest." + +### fn packages.dependencies.withType + +```ts +withType(type) +``` + +"Type is the type of package. Can be either Configuration or Provider." \ No newline at end of file diff --git a/docs/crossplane/1.17/util/connectionDetail.md b/docs/crossplane/1.17/util/connectionDetail.md new file mode 100644 index 0000000..ffe9149 --- /dev/null +++ b/docs/crossplane/1.17/util/connectionDetail.md @@ -0,0 +1,43 @@ +--- +permalink: /crossplane/1.17/util/connectionDetail/ +--- + +# util.connectionDetail + +Create connectionDetails for Compositions. + +## Index + +* [`fn fromConnectionSecretKey(key, name='-same as key-')`](#fn-fromconnectionsecretkey) +* [`fn fromFieldPath(key, name)`](#fn-fromfieldpath) +* [`fn fromValue(value, name)`](#fn-fromvalue) + +## Fields + +### fn fromConnectionSecretKey + +```ts +fromConnectionSecretKey(key, name='-same as key-') +``` + +Derive the XR's connection detail field `name` from the `key` of the composed +resource's connection secret. The argument `name` defaults to the value of `key`. + + +### fn fromFieldPath + +```ts +fromFieldPath(key, name) +``` + +Derive the XR's connection detail field `name` from the `key` field path of the +composed resource. + + +### fn fromValue + +```ts +fromValue(value, name) +``` + +Always sets the XR's connection detail field `name` to `value`. diff --git a/docs/crossplane/1.17/util/index.md b/docs/crossplane/1.17/util/index.md new file mode 100644 index 0000000..f334312 --- /dev/null +++ b/docs/crossplane/1.17/util/index.md @@ -0,0 +1,12 @@ +--- +permalink: /crossplane/1.17/util/ +--- + +# util + +A util library for Crossplane. + +* [connectionDetail](connectionDetail.md) +* [patch](patch.md) +* [resource](resource.md) +* [version](version.md) \ No newline at end of file diff --git a/docs/crossplane/1.17/util/patch.md b/docs/crossplane/1.17/util/patch.md new file mode 100644 index 0000000..81eb63c --- /dev/null +++ b/docs/crossplane/1.17/util/patch.md @@ -0,0 +1,305 @@ +--- +permalink: /crossplane/1.17/util/patch/ +--- + +# util.patch + +Create patches for Composition resources. + +## Index + +* [`fn combineFromComposite(toFieldPath, fmtString, fromFieldPaths)`](#fn-combinefromcomposite) +* [`fn combineToComposite(toFieldPath, fmtString, fromFieldPaths)`](#fn-combinetocomposite) +* [`fn fromCompositeFieldPath(from, to)`](#fn-fromcompositefieldpath) +* [`fn toCompositeFieldPath(from, to)`](#fn-tocompositefieldpath) +* [`obj policy`](#obj-policy) + * [`fn fromFieldPath(value)`](#fn-policyfromfieldpath) + * [`fn withMergeOptions(appendSlice, keepMapValues)`](#fn-policywithmergeoptions) +* [`obj transforms`](#obj-transforms) + * [`fn bool(true_value, false_value)`](#fn-transformsbool) + * [`fn clampMax(max)`](#fn-transformsclampmax) + * [`fn clampMin(min)`](#fn-transformsclampmin) + * [`fn convert(toType)`](#fn-transformsconvert) + * [`fn literalPattern(literal, result)`](#fn-transformsliteralpattern) + * [`fn map(map)`](#fn-transformsmap) + * [`fn match(patterns, fallbackValue, fallbackTo)`](#fn-transformsmatch) + * [`fn regexpPattern(regexp, result)`](#fn-transformsregexppattern) + * [`obj transforms.string`](#obj-transformsstring) + * [`fn convertFromBase64()`](#fn-transformsstringconvertfrombase64) + * [`fn convertToBase64()`](#fn-transformsstringconverttobase64) + * [`fn convertToJson()`](#fn-transformsstringconverttojson) + * [`fn convertToLower()`](#fn-transformsstringconverttolower) + * [`fn convertToSha1()`](#fn-transformsstringconverttosha1) + * [`fn convertToSha256()`](#fn-transformsstringconverttosha256) + * [`fn convertToSha512()`](#fn-transformsstringconverttosha512) + * [`fn convertToUpper()`](#fn-transformsstringconverttoupper) + * [`fn fmt(fmt)`](#fn-transformsstringfmt) + * [`fn regexp(match, group)`](#fn-transformsstringregexp) + * [`fn trimPrefix(trim)`](#fn-transformsstringtrimprefix) + * [`fn trimSuffix(trim)`](#fn-transformsstringtrimsuffix) + +## Fields + +### fn combineFromComposite + +```ts +combineFromComposite(toFieldPath, fmtString, fromFieldPaths) +``` + +This type patches from a combination of multiple fields within the XR +to a field within the composed resource. +It’s commonly used to expose a composed resource spec field as an XR spec field. + + +### fn combineToComposite + +```ts +combineToComposite(toFieldPath, fmtString, fromFieldPaths) +``` + +The inverse of CombineFromComposite. This type patches from multiple fields +within the composed resource to a a field within the XR. +It’s commonly used to derive an XR status field from a combination of resource fields. + + +### fn fromCompositeFieldPath + +```ts +fromCompositeFieldPath(from, to) +``` + +This type patches from a field within the XR to a field within the composed +resource. It’s commonly used to expose a composed resource spec field as an XR +spec field. + + +### fn toCompositeFieldPath + +```ts +toCompositeFieldPath(from, to) +``` + +The inverse of FromCompositeFieldPath. This type patches from a field within the +composed resource to a field within the XR. It’s commonly used to derive an XR +status field from a composed resource status field. + + +## obj policy + + + +### fn policy.fromFieldPath + +```ts +fromFieldPath(value) +``` + +By default Crossplane will skip the patch until all of the variables to be +combined have values. Set the fromFieldPath policy to 'Required' to instead +abort composition and return an error if a variable has no value. + + +### fn policy.withMergeOptions + +```ts +withMergeOptions(appendSlice, keepMapValues) +``` + +You can patch entire objects or arrays from one resource to another. By default +the 'to' object or array will be overwritten, not merged. Use the 'mergeOptions' +field to override this behaviour. Note that these fields accidentally leak Go +terminology - 'slice' means 'array'. 'map' means 'map' in YAML or 'object' in +JSON. + + +## obj transforms + + + +### fn transforms.bool + +```ts +bool(true_value, false_value) +``` + +Transform strings to booleans. +Example: `bool(true_value='Orphan', false_value='Delete')` + + +### fn transforms.clampMax + +```ts +clampMax(max) +``` + +Clamp a number to a maximum value. + + +### fn transforms.clampMin + +```ts +clampMin(min) +``` + +Clamp a number to a minimum value. + + +### fn transforms.convert + +```ts +convert(toType) +``` + +Convert a field to a different type. + + +### fn transforms.literalPattern + +```ts +literalPattern(literal, result) +``` + +Match a value against a literal, and return the result if the value matches. +To be used with the match transform. + + +### fn transforms.map + +```ts +map(map) +``` + +Use a Map to transform keys into values. + + +### fn transforms.match + +```ts +match(patterns, fallbackValue, fallbackTo) +``` + +Match a value to a list of patterns. +Use the literalPattern or regexpPattern function to create the patterns. +Return the fallbackValue or fallback to the input if no pattern matches. + + +### fn transforms.regexpPattern + +```ts +regexpPattern(regexp, result) +``` + +Match a value against a regexp, and return the result if the value matches. +To be used with the match transform. + + +## obj transforms.string + + + +### fn transforms.string.convertFromBase64 + +```ts +convertFromBase64() +``` + +Convert a base64 string to a string. + + +### fn transforms.string.convertToBase64 + +```ts +convertToBase64() +``` + +Convert a string to base64. + + +### fn transforms.string.convertToJson + +```ts +convertToJson() +``` + +Convert a string to JSON. + + +### fn transforms.string.convertToLower + +```ts +convertToLower() +``` + +Convert a string to lower case. + + +### fn transforms.string.convertToSha1 + +```ts +convertToSha1() +``` + +Convert a string to a SHA1 hash. + + +### fn transforms.string.convertToSha256 + +```ts +convertToSha256() +``` + +Convert a string to a SHA256 hash. + + +### fn transforms.string.convertToSha512 + +```ts +convertToSha512() +``` + +Convert a string to a SHA512 hash. + + +### fn transforms.string.convertToUpper + +```ts +convertToUpper() +``` + +Convert a string to upper case. + + +### fn transforms.string.fmt + +```ts +fmt(fmt) +``` + +Format a string. The format string is a Go format string. + + +### fn transforms.string.regexp + +```ts +regexp(match, group) +``` + +Match a regexp against a string. The group is optional and if omitted, the whole match is returned. + + +### fn transforms.string.trimPrefix + +```ts +trimPrefix(trim) +``` + +Trim a prefix from a string. + + +### fn transforms.string.trimSuffix + +```ts +trimSuffix(trim) +``` + +Trim a suffix from a string. diff --git a/docs/crossplane/1.17/util/resource.md b/docs/crossplane/1.17/util/resource.md new file mode 100644 index 0000000..e15c147 --- /dev/null +++ b/docs/crossplane/1.17/util/resource.md @@ -0,0 +1,98 @@ +--- +permalink: /crossplane/1.17/util/resource/ +--- + +# util.resource + +Create resources for Compositions. + +## Index + +* [`fn new(name, resource)`](#fn-new) +* [`fn withBaseMixin(baseFunc)`](#fn-withbasemixin) +* [`fn withConnectionDetailsMixin(namespace, connectionDetails)`](#fn-withconnectiondetailsmixin) +* [`fn withConnectionSecretMixin(suffix, namespace)`](#fn-withconnectionsecretmixin) +* [`fn withDeleteProtectionPatch(default='Orphan')`](#fn-withdeleteprotectionpatch) +* [`fn withExternalNamePatch()`](#fn-withexternalnamepatch) +* [`fn withPatchesMixin(patches)`](#fn-withpatchesmixin) + +## Fields + +### fn new + +```ts +new(name, resource) +``` + +Create a new instance of a resource for a Composition, where `name` is a unique +name within of the resource within the Composition resource list and `resource` is +the jsonnet library for this resource (for example: +`(import 'provider-sql/0.4/main.libsonnet').mysql.v1alpha1.database`) + + +### fn withBaseMixin + +```ts +withBaseMixin(baseFunc) +``` + +Extend the resource base, this expects a function with the resource library as its +argument: +``` ++ util.resource.withBaseMixin( + function(resource) resource.spec.withAttribute('value'), +) +``` + + +### fn withConnectionDetailsMixin + +```ts +withConnectionDetailsMixin(namespace, connectionDetails) +``` + +Add connectionDetails that will be provided by this resource. + +This also configures the writeConnectionSecretToRef to properly propagate the +connectionDetails. The `namespace` attribute conventionally matches the Crossplane +system namespace. + + +### fn withConnectionSecretMixin + +```ts +withConnectionSecretMixin(suffix, namespace) +``` + +withConnectionSecretMixin ensures connectionSecrets are propagated to the +management (ie. crossplane) namespace, the name of the secret will be +<metadata.uid>-<suffix>. + + +### fn withDeleteProtectionPatch + +```ts +withDeleteProtectionPatch(default='Orphan') +``` + +withDeleteProtectionPatch provides protection mechanism for unwanted removal +of resources, it defaults to keep resources around. + + +### fn withExternalNamePatch + +```ts +withExternalNamePatch() +``` + +withExternalNamePatch is commonly used by providers to name the upstream +resource or to import existing resources + + +### fn withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +Add patches that can be applied to this resource. diff --git a/docs/crossplane/1.17/util/version.md b/docs/crossplane/1.17/util/version.md new file mode 100644 index 0000000..6f1898e --- /dev/null +++ b/docs/crossplane/1.17/util/version.md @@ -0,0 +1,62 @@ +--- +permalink: /crossplane/1.17/util/version/ +--- + +# util.version + +Create versions for CompositeResourceDefinitions. + +## Index + +* [`fn new(version, served='true', referenceable='true')`](#fn-new) +* [`fn addParameterProperty(name, type='string|number|array|object', description='', required, mixin)`](#fn-addparameterproperty) +* [`fn withPropertiesMixin(properties)`](#fn-withpropertiesmixin) + +## Fields + +### fn new + +```ts +new(version, served='true', referenceable='true') +``` + +Create a new `version` (e.g. v1alpha1, v1beta1 and v1) schema for an XRD. + +`served` specifies that XRs should be served at this version. It can be set to +false to temporarily disable a version, for example to test whether doing so +breaks anything before a version is removed wholesale. + +`referenceable` denotes the version of a type of XR that Compositions may use. +Only one version may be referenceable. + + +### fn addParameterProperty + +```ts +addParameterProperty(name, type='string|number|array|object', description='', required, mixin) +``` + +Add properties to the Schema. + +This shortcut enables to quickly extend the 'parameters' property of a version. + +Attributes: + +- `name` of the property +- `type` eg. string, number, array, object +- `description` for documentation +- `required` is this a required property? +- `mixin` can be used to add enums or the type of an array member for validation. + + +### fn withPropertiesMixin + +```ts +withPropertiesMixin(properties) +``` + +Extend Schema with properties. + +Schema is an OpenAPI schema just like the one used by Kubernetes CRDs. It +determines what fields your XR and claim will have. Note that Crossplane will +automatically extend with some additional Crossplane machinery. diff --git a/docs/function-cel-filter/0.1/README.md b/docs/function-cel-filter/0.1/README.md new file mode 100644 index 0000000..04268fd --- /dev/null +++ b/docs/function-cel-filter/0.1/README.md @@ -0,0 +1,13 @@ +--- +permalink: /function-cel-filter/0.1/ +--- + +# function_cel_filter + +```jsonnet +local function_cel_filter = import "github.com/jsonnet-libs/crossplane-core-libsonnet/function-cel-filter/0.1/main.libsonnet" +``` + + + +* [cel](cel/index.md) \ No newline at end of file diff --git a/docs/function-cel-filter/0.1/cel/index.md b/docs/function-cel-filter/0.1/cel/index.md new file mode 100644 index 0000000..d20ba82 --- /dev/null +++ b/docs/function-cel-filter/0.1/cel/index.md @@ -0,0 +1,9 @@ +--- +permalink: /function-cel-filter/0.1/cel/ +--- + +# cel + + + +* [v1beta1](v1beta1/index.md) \ No newline at end of file diff --git a/docs/function-cel-filter/0.1/cel/v1beta1/filters.md b/docs/function-cel-filter/0.1/cel/v1beta1/filters.md new file mode 100644 index 0000000..4a73b55 --- /dev/null +++ b/docs/function-cel-filter/0.1/cel/v1beta1/filters.md @@ -0,0 +1,248 @@ +--- +permalink: /function-cel-filter/0.1/cel/v1beta1/filters/ +--- + +# cel.v1beta1.filters + +"Filters can be used to filter desired composed resources." + +## Index + +* [`fn new(name)`](#fn-new) +* [`fn withFilters(filters)`](#fn-withfilters) +* [`fn withFiltersMixin(filters)`](#fn-withfiltersmixin) +* [`obj filters`](#obj-filters) + * [`fn withExpression(expression)`](#fn-filterswithexpression) + * [`fn withName(name)`](#fn-filterswithname) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Filters + +### fn withFilters + +```ts +withFilters(filters) +``` + +"Filters to apply to the desired composed resources produced by previous\nfunctions in the pipeline. Each filter matches a desired composed\nresource by name. If the expression evaluates to true, the composed\nresource will be included. Desired composed resources that don't match\nany filter are always included." + +### fn withFiltersMixin + +```ts +withFiltersMixin(filters) +``` + +"Filters to apply to the desired composed resources produced by previous\nfunctions in the pipeline. Each filter matches a desired composed\nresource by name. If the expression evaluates to true, the composed\nresource will be included. Desired composed resources that don't match\nany filter are always included." + +**Note:** This function appends passed data to existing values + +## obj filters + +"Filters to apply to the desired composed resources produced by previous\nfunctions in the pipeline. Each filter matches a desired composed\nresource by name. If the expression evaluates to true, the composed\nresource will be included. Desired composed resources that don't match\nany filter are always included." + +### fn filters.withExpression + +```ts +withExpression(expression) +``` + +"Expression is a CEL expression. See https://github.com/google/cel-spec.\nThe following top-level variables are available to the expression:\n\n\n* observed\n* desired\n* context\n\n\nExample expressions:\n\n\n* observed.composite.resource.spec.widgets == 42\n* observed.resources['composed'].connection_details['user'] == b'admin'\n* desired.resources['composed'].resource.spec.widgets == 42\n\n\nSee the RunFunctionRequest protobuf message for schema details.\nhttps://buf.build/crossplane/crossplane/docs/main:apiextensions.fn.proto.v1beta1" + +### fn filters.withName + +```ts +withName(name) +``` + +"Name of the desired composed resource(s) this filter should match.\n\n\nUse regular expressions to match multiple resources. Expressions are\nautomatically prefixed with ^ and suffixed with $. For example 'buck.*'\nbecomes '^buck.*$'. See https://github.com/google/re2/wiki/Syntax." + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" \ No newline at end of file diff --git a/docs/function-cel-filter/0.1/cel/v1beta1/index.md b/docs/function-cel-filter/0.1/cel/v1beta1/index.md new file mode 100644 index 0000000..dde4658 --- /dev/null +++ b/docs/function-cel-filter/0.1/cel/v1beta1/index.md @@ -0,0 +1,9 @@ +--- +permalink: /function-cel-filter/0.1/cel/v1beta1/ +--- + +# cel.v1beta1 + + + +* [filters](filters.md) \ No newline at end of file diff --git a/docs/function-patch-and-transform/0.7/README.md b/docs/function-patch-and-transform/0.7/README.md new file mode 100644 index 0000000..95c59f4 --- /dev/null +++ b/docs/function-patch-and-transform/0.7/README.md @@ -0,0 +1,13 @@ +--- +permalink: /function-patch-and-transform/0.7/ +--- + +# function_patch_and_transform + +```jsonnet +local function_patch_and_transform = import "github.com/jsonnet-libs/crossplane-core-libsonnet/function-patch-and-transform/0.7/main.libsonnet" +``` + + + +* [pt](pt/index.md) \ No newline at end of file diff --git a/docs/function-patch-and-transform/0.7/pt/index.md b/docs/function-patch-and-transform/0.7/pt/index.md new file mode 100644 index 0000000..6ff8582 --- /dev/null +++ b/docs/function-patch-and-transform/0.7/pt/index.md @@ -0,0 +1,9 @@ +--- +permalink: /function-patch-and-transform/0.7/pt/ +--- + +# pt + + + +* [v1beta1](v1beta1/index.md) \ No newline at end of file diff --git a/docs/function-patch-and-transform/0.7/pt/v1beta1/index.md b/docs/function-patch-and-transform/0.7/pt/v1beta1/index.md new file mode 100644 index 0000000..5ef95c9 --- /dev/null +++ b/docs/function-patch-and-transform/0.7/pt/v1beta1/index.md @@ -0,0 +1,9 @@ +--- +permalink: /function-patch-and-transform/0.7/pt/v1beta1/ +--- + +# pt.v1beta1 + + + +* [resources](resources.md) \ No newline at end of file diff --git a/docs/function-patch-and-transform/0.7/pt/v1beta1/resources.md b/docs/function-patch-and-transform/0.7/pt/v1beta1/resources.md new file mode 100644 index 0000000..9f2f6d1 --- /dev/null +++ b/docs/function-patch-and-transform/0.7/pt/v1beta1/resources.md @@ -0,0 +1,1772 @@ +--- +permalink: /function-patch-and-transform/0.7/pt/v1beta1/resources/ +--- + +# pt.v1beta1.resources + +"Resources specifies Patch & Transform resource templates." + +## Index + +* [`fn new(name)`](#fn-new) +* [`fn withPatchSets(patchSets)`](#fn-withpatchsets) +* [`fn withPatchSetsMixin(patchSets)`](#fn-withpatchsetsmixin) +* [`fn withResources(resources)`](#fn-withresources) +* [`fn withResourcesMixin(resources)`](#fn-withresourcesmixin) +* [`obj environment`](#obj-environment) + * [`fn withPatches(patches)`](#fn-environmentwithpatches) + * [`fn withPatchesMixin(patches)`](#fn-environmentwithpatchesmixin) + * [`obj environment.patches`](#obj-environmentpatches) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-environmentpatcheswithfromfieldpath) + * [`fn withToFieldPath(toFieldPath)`](#fn-environmentpatcheswithtofieldpath) + * [`fn withTransforms(transforms)`](#fn-environmentpatcheswithtransforms) + * [`fn withTransformsMixin(transforms)`](#fn-environmentpatcheswithtransformsmixin) + * [`fn withType(type)`](#fn-environmentpatcheswithtype) + * [`obj environment.patches.combine`](#obj-environmentpatchescombine) + * [`fn withStrategy(strategy)`](#fn-environmentpatchescombinewithstrategy) + * [`fn withVariables(variables)`](#fn-environmentpatchescombinewithvariables) + * [`fn withVariablesMixin(variables)`](#fn-environmentpatchescombinewithvariablesmixin) + * [`obj environment.patches.combine.string`](#obj-environmentpatchescombinestring) + * [`fn withFmt(fmt)`](#fn-environmentpatchescombinestringwithfmt) + * [`obj environment.patches.combine.variables`](#obj-environmentpatchescombinevariables) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-environmentpatchescombinevariableswithfromfieldpath) + * [`obj environment.patches.policy`](#obj-environmentpatchespolicy) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-environmentpatchespolicywithfromfieldpath) + * [`fn withToFieldPath(toFieldPath)`](#fn-environmentpatchespolicywithtofieldpath) + * [`obj environment.patches.transforms`](#obj-environmentpatchestransforms) + * [`fn withMap(map)`](#fn-environmentpatchestransformswithmap) + * [`fn withMapMixin(map)`](#fn-environmentpatchestransformswithmapmixin) + * [`fn withType(type)`](#fn-environmentpatchestransformswithtype) + * [`obj environment.patches.transforms.convert`](#obj-environmentpatchestransformsconvert) + * [`fn withFormat(format)`](#fn-environmentpatchestransformsconvertwithformat) + * [`fn withToType(toType)`](#fn-environmentpatchestransformsconvertwithtotype) + * [`obj environment.patches.transforms.match`](#obj-environmentpatchestransformsmatch) + * [`fn withFallbackTo(fallbackTo)`](#fn-environmentpatchestransformsmatchwithfallbackto) + * [`fn withFallbackValue(fallbackValue)`](#fn-environmentpatchestransformsmatchwithfallbackvalue) + * [`fn withPatterns(patterns)`](#fn-environmentpatchestransformsmatchwithpatterns) + * [`fn withPatternsMixin(patterns)`](#fn-environmentpatchestransformsmatchwithpatternsmixin) + * [`obj environment.patches.transforms.match.patterns`](#obj-environmentpatchestransformsmatchpatterns) + * [`fn withLiteral(literal)`](#fn-environmentpatchestransformsmatchpatternswithliteral) + * [`fn withRegexp(regexp)`](#fn-environmentpatchestransformsmatchpatternswithregexp) + * [`fn withResult(result)`](#fn-environmentpatchestransformsmatchpatternswithresult) + * [`fn withType(type)`](#fn-environmentpatchestransformsmatchpatternswithtype) + * [`obj environment.patches.transforms.math`](#obj-environmentpatchestransformsmath) + * [`fn withClampMax(clampMax)`](#fn-environmentpatchestransformsmathwithclampmax) + * [`fn withClampMin(clampMin)`](#fn-environmentpatchestransformsmathwithclampmin) + * [`fn withMultiply(multiply)`](#fn-environmentpatchestransformsmathwithmultiply) + * [`fn withType(type)`](#fn-environmentpatchestransformsmathwithtype) + * [`obj environment.patches.transforms.string`](#obj-environmentpatchestransformsstring) + * [`fn withConvert(convert)`](#fn-environmentpatchestransformsstringwithconvert) + * [`fn withFmt(fmt)`](#fn-environmentpatchestransformsstringwithfmt) + * [`fn withTrim(trim)`](#fn-environmentpatchestransformsstringwithtrim) + * [`fn withType(type)`](#fn-environmentpatchestransformsstringwithtype) + * [`obj environment.patches.transforms.string.join`](#obj-environmentpatchestransformsstringjoin) + * [`fn withSeparator(separator)`](#fn-environmentpatchestransformsstringjoinwithseparator) + * [`obj environment.patches.transforms.string.regexp`](#obj-environmentpatchestransformsstringregexp) + * [`fn withGroup(group)`](#fn-environmentpatchestransformsstringregexpwithgroup) + * [`fn withMatch(match)`](#fn-environmentpatchestransformsstringregexpwithmatch) + * [`obj environment.patches.transforms.string.replace`](#obj-environmentpatchestransformsstringreplace) + * [`fn withReplace(replace)`](#fn-environmentpatchestransformsstringreplacewithreplace) + * [`fn withSearch(search)`](#fn-environmentpatchestransformsstringreplacewithsearch) +* [`obj metadata`](#obj-metadata) + * [`fn withAnnotations(annotations)`](#fn-metadatawithannotations) + * [`fn withAnnotationsMixin(annotations)`](#fn-metadatawithannotationsmixin) + * [`fn withClusterName(clusterName)`](#fn-metadatawithclustername) + * [`fn withCreationTimestamp(creationTimestamp)`](#fn-metadatawithcreationtimestamp) + * [`fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)`](#fn-metadatawithdeletiongraceperiodseconds) + * [`fn withDeletionTimestamp(deletionTimestamp)`](#fn-metadatawithdeletiontimestamp) + * [`fn withFinalizers(finalizers)`](#fn-metadatawithfinalizers) + * [`fn withFinalizersMixin(finalizers)`](#fn-metadatawithfinalizersmixin) + * [`fn withGenerateName(generateName)`](#fn-metadatawithgeneratename) + * [`fn withGeneration(generation)`](#fn-metadatawithgeneration) + * [`fn withLabels(labels)`](#fn-metadatawithlabels) + * [`fn withLabelsMixin(labels)`](#fn-metadatawithlabelsmixin) + * [`fn withName(name)`](#fn-metadatawithname) + * [`fn withNamespace(namespace)`](#fn-metadatawithnamespace) + * [`fn withOwnerReferences(ownerReferences)`](#fn-metadatawithownerreferences) + * [`fn withOwnerReferencesMixin(ownerReferences)`](#fn-metadatawithownerreferencesmixin) + * [`fn withResourceVersion(resourceVersion)`](#fn-metadatawithresourceversion) + * [`fn withSelfLink(selfLink)`](#fn-metadatawithselflink) + * [`fn withUid(uid)`](#fn-metadatawithuid) +* [`obj patchSets`](#obj-patchsets) + * [`fn withName(name)`](#fn-patchsetswithname) + * [`fn withPatches(patches)`](#fn-patchsetswithpatches) + * [`fn withPatchesMixin(patches)`](#fn-patchsetswithpatchesmixin) + * [`obj patchSets.patches`](#obj-patchsetspatches) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-patchsetspatcheswithfromfieldpath) + * [`fn withToFieldPath(toFieldPath)`](#fn-patchsetspatcheswithtofieldpath) + * [`fn withTransforms(transforms)`](#fn-patchsetspatcheswithtransforms) + * [`fn withTransformsMixin(transforms)`](#fn-patchsetspatcheswithtransformsmixin) + * [`fn withType(type)`](#fn-patchsetspatcheswithtype) + * [`obj patchSets.patches.combine`](#obj-patchsetspatchescombine) + * [`fn withStrategy(strategy)`](#fn-patchsetspatchescombinewithstrategy) + * [`fn withVariables(variables)`](#fn-patchsetspatchescombinewithvariables) + * [`fn withVariablesMixin(variables)`](#fn-patchsetspatchescombinewithvariablesmixin) + * [`obj patchSets.patches.combine.string`](#obj-patchsetspatchescombinestring) + * [`fn withFmt(fmt)`](#fn-patchsetspatchescombinestringwithfmt) + * [`obj patchSets.patches.combine.variables`](#obj-patchsetspatchescombinevariables) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-patchsetspatchescombinevariableswithfromfieldpath) + * [`obj patchSets.patches.policy`](#obj-patchsetspatchespolicy) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-patchsetspatchespolicywithfromfieldpath) + * [`fn withToFieldPath(toFieldPath)`](#fn-patchsetspatchespolicywithtofieldpath) + * [`obj patchSets.patches.transforms`](#obj-patchsetspatchestransforms) + * [`fn withMap(map)`](#fn-patchsetspatchestransformswithmap) + * [`fn withMapMixin(map)`](#fn-patchsetspatchestransformswithmapmixin) + * [`fn withType(type)`](#fn-patchsetspatchestransformswithtype) + * [`obj patchSets.patches.transforms.convert`](#obj-patchsetspatchestransformsconvert) + * [`fn withFormat(format)`](#fn-patchsetspatchestransformsconvertwithformat) + * [`fn withToType(toType)`](#fn-patchsetspatchestransformsconvertwithtotype) + * [`obj patchSets.patches.transforms.match`](#obj-patchsetspatchestransformsmatch) + * [`fn withFallbackTo(fallbackTo)`](#fn-patchsetspatchestransformsmatchwithfallbackto) + * [`fn withFallbackValue(fallbackValue)`](#fn-patchsetspatchestransformsmatchwithfallbackvalue) + * [`fn withPatterns(patterns)`](#fn-patchsetspatchestransformsmatchwithpatterns) + * [`fn withPatternsMixin(patterns)`](#fn-patchsetspatchestransformsmatchwithpatternsmixin) + * [`obj patchSets.patches.transforms.match.patterns`](#obj-patchsetspatchestransformsmatchpatterns) + * [`fn withLiteral(literal)`](#fn-patchsetspatchestransformsmatchpatternswithliteral) + * [`fn withRegexp(regexp)`](#fn-patchsetspatchestransformsmatchpatternswithregexp) + * [`fn withResult(result)`](#fn-patchsetspatchestransformsmatchpatternswithresult) + * [`fn withType(type)`](#fn-patchsetspatchestransformsmatchpatternswithtype) + * [`obj patchSets.patches.transforms.math`](#obj-patchsetspatchestransformsmath) + * [`fn withClampMax(clampMax)`](#fn-patchsetspatchestransformsmathwithclampmax) + * [`fn withClampMin(clampMin)`](#fn-patchsetspatchestransformsmathwithclampmin) + * [`fn withMultiply(multiply)`](#fn-patchsetspatchestransformsmathwithmultiply) + * [`fn withType(type)`](#fn-patchsetspatchestransformsmathwithtype) + * [`obj patchSets.patches.transforms.string`](#obj-patchsetspatchestransformsstring) + * [`fn withConvert(convert)`](#fn-patchsetspatchestransformsstringwithconvert) + * [`fn withFmt(fmt)`](#fn-patchsetspatchestransformsstringwithfmt) + * [`fn withTrim(trim)`](#fn-patchsetspatchestransformsstringwithtrim) + * [`fn withType(type)`](#fn-patchsetspatchestransformsstringwithtype) + * [`obj patchSets.patches.transforms.string.join`](#obj-patchsetspatchestransformsstringjoin) + * [`fn withSeparator(separator)`](#fn-patchsetspatchestransformsstringjoinwithseparator) + * [`obj patchSets.patches.transforms.string.regexp`](#obj-patchsetspatchestransformsstringregexp) + * [`fn withGroup(group)`](#fn-patchsetspatchestransformsstringregexpwithgroup) + * [`fn withMatch(match)`](#fn-patchsetspatchestransformsstringregexpwithmatch) + * [`obj patchSets.patches.transforms.string.replace`](#obj-patchsetspatchestransformsstringreplace) + * [`fn withReplace(replace)`](#fn-patchsetspatchestransformsstringreplacewithreplace) + * [`fn withSearch(search)`](#fn-patchsetspatchestransformsstringreplacewithsearch) +* [`obj resources`](#obj-resources) + * [`fn withBase(base)`](#fn-resourceswithbase) + * [`fn withBaseMixin(base)`](#fn-resourceswithbasemixin) + * [`fn withConnectionDetails(connectionDetails)`](#fn-resourceswithconnectiondetails) + * [`fn withConnectionDetailsMixin(connectionDetails)`](#fn-resourceswithconnectiondetailsmixin) + * [`fn withName(name)`](#fn-resourceswithname) + * [`fn withPatches(patches)`](#fn-resourceswithpatches) + * [`fn withPatchesMixin(patches)`](#fn-resourceswithpatchesmixin) + * [`fn withReadinessChecks(readinessChecks)`](#fn-resourceswithreadinesschecks) + * [`fn withReadinessChecksMixin(readinessChecks)`](#fn-resourceswithreadinesschecksmixin) + * [`obj resources.connectionDetails`](#obj-resourcesconnectiondetails) + * [`fn withFromConnectionSecretKey(fromConnectionSecretKey)`](#fn-resourcesconnectiondetailswithfromconnectionsecretkey) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-resourcesconnectiondetailswithfromfieldpath) + * [`fn withName(name)`](#fn-resourcesconnectiondetailswithname) + * [`fn withType(type)`](#fn-resourcesconnectiondetailswithtype) + * [`fn withValue(value)`](#fn-resourcesconnectiondetailswithvalue) + * [`obj resources.patches`](#obj-resourcespatches) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-resourcespatcheswithfromfieldpath) + * [`fn withPatchSetName(patchSetName)`](#fn-resourcespatcheswithpatchsetname) + * [`fn withToFieldPath(toFieldPath)`](#fn-resourcespatcheswithtofieldpath) + * [`fn withTransforms(transforms)`](#fn-resourcespatcheswithtransforms) + * [`fn withTransformsMixin(transforms)`](#fn-resourcespatcheswithtransformsmixin) + * [`fn withType(type)`](#fn-resourcespatcheswithtype) + * [`obj resources.patches.combine`](#obj-resourcespatchescombine) + * [`fn withStrategy(strategy)`](#fn-resourcespatchescombinewithstrategy) + * [`fn withVariables(variables)`](#fn-resourcespatchescombinewithvariables) + * [`fn withVariablesMixin(variables)`](#fn-resourcespatchescombinewithvariablesmixin) + * [`obj resources.patches.combine.string`](#obj-resourcespatchescombinestring) + * [`fn withFmt(fmt)`](#fn-resourcespatchescombinestringwithfmt) + * [`obj resources.patches.combine.variables`](#obj-resourcespatchescombinevariables) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-resourcespatchescombinevariableswithfromfieldpath) + * [`obj resources.patches.policy`](#obj-resourcespatchespolicy) + * [`fn withFromFieldPath(fromFieldPath)`](#fn-resourcespatchespolicywithfromfieldpath) + * [`fn withToFieldPath(toFieldPath)`](#fn-resourcespatchespolicywithtofieldpath) + * [`obj resources.patches.transforms`](#obj-resourcespatchestransforms) + * [`fn withMap(map)`](#fn-resourcespatchestransformswithmap) + * [`fn withMapMixin(map)`](#fn-resourcespatchestransformswithmapmixin) + * [`fn withType(type)`](#fn-resourcespatchestransformswithtype) + * [`obj resources.patches.transforms.convert`](#obj-resourcespatchestransformsconvert) + * [`fn withFormat(format)`](#fn-resourcespatchestransformsconvertwithformat) + * [`fn withToType(toType)`](#fn-resourcespatchestransformsconvertwithtotype) + * [`obj resources.patches.transforms.match`](#obj-resourcespatchestransformsmatch) + * [`fn withFallbackTo(fallbackTo)`](#fn-resourcespatchestransformsmatchwithfallbackto) + * [`fn withFallbackValue(fallbackValue)`](#fn-resourcespatchestransformsmatchwithfallbackvalue) + * [`fn withPatterns(patterns)`](#fn-resourcespatchestransformsmatchwithpatterns) + * [`fn withPatternsMixin(patterns)`](#fn-resourcespatchestransformsmatchwithpatternsmixin) + * [`obj resources.patches.transforms.match.patterns`](#obj-resourcespatchestransformsmatchpatterns) + * [`fn withLiteral(literal)`](#fn-resourcespatchestransformsmatchpatternswithliteral) + * [`fn withRegexp(regexp)`](#fn-resourcespatchestransformsmatchpatternswithregexp) + * [`fn withResult(result)`](#fn-resourcespatchestransformsmatchpatternswithresult) + * [`fn withType(type)`](#fn-resourcespatchestransformsmatchpatternswithtype) + * [`obj resources.patches.transforms.math`](#obj-resourcespatchestransformsmath) + * [`fn withClampMax(clampMax)`](#fn-resourcespatchestransformsmathwithclampmax) + * [`fn withClampMin(clampMin)`](#fn-resourcespatchestransformsmathwithclampmin) + * [`fn withMultiply(multiply)`](#fn-resourcespatchestransformsmathwithmultiply) + * [`fn withType(type)`](#fn-resourcespatchestransformsmathwithtype) + * [`obj resources.patches.transforms.string`](#obj-resourcespatchestransformsstring) + * [`fn withConvert(convert)`](#fn-resourcespatchestransformsstringwithconvert) + * [`fn withFmt(fmt)`](#fn-resourcespatchestransformsstringwithfmt) + * [`fn withTrim(trim)`](#fn-resourcespatchestransformsstringwithtrim) + * [`fn withType(type)`](#fn-resourcespatchestransformsstringwithtype) + * [`obj resources.patches.transforms.string.join`](#obj-resourcespatchestransformsstringjoin) + * [`fn withSeparator(separator)`](#fn-resourcespatchestransformsstringjoinwithseparator) + * [`obj resources.patches.transforms.string.regexp`](#obj-resourcespatchestransformsstringregexp) + * [`fn withGroup(group)`](#fn-resourcespatchestransformsstringregexpwithgroup) + * [`fn withMatch(match)`](#fn-resourcespatchestransformsstringregexpwithmatch) + * [`obj resources.patches.transforms.string.replace`](#obj-resourcespatchestransformsstringreplace) + * [`fn withReplace(replace)`](#fn-resourcespatchestransformsstringreplacewithreplace) + * [`fn withSearch(search)`](#fn-resourcespatchestransformsstringreplacewithsearch) + * [`obj resources.readinessChecks`](#obj-resourcesreadinesschecks) + * [`fn withFieldPath(fieldPath)`](#fn-resourcesreadinesscheckswithfieldpath) + * [`fn withMatchInteger(matchInteger)`](#fn-resourcesreadinesscheckswithmatchinteger) + * [`fn withMatchString(matchString)`](#fn-resourcesreadinesscheckswithmatchstring) + * [`fn withType(type)`](#fn-resourcesreadinesscheckswithtype) + * [`obj resources.readinessChecks.matchCondition`](#obj-resourcesreadinesschecksmatchcondition) + * [`fn withType(type)`](#fn-resourcesreadinesschecksmatchconditionwithtype) + +## Fields + +### fn new + +```ts +new(name) +``` + +new returns an instance of Resources + +### fn withPatchSets + +```ts +withPatchSets(patchSets) +``` + +"PatchSets define a named set of patches that may be included by any\nresource. PatchSets cannot themselves refer to other PatchSets." + +### fn withPatchSetsMixin + +```ts +withPatchSetsMixin(patchSets) +``` + +"PatchSets define a named set of patches that may be included by any\nresource. PatchSets cannot themselves refer to other PatchSets." + +**Note:** This function appends passed data to existing values + +### fn withResources + +```ts +withResources(resources) +``` + +"Resources is a list of resource templates that will be used when a\ncomposite resource is created." + +### fn withResourcesMixin + +```ts +withResourcesMixin(resources) +``` + +"Resources is a list of resource templates that will be used when a\ncomposite resource is created." + +**Note:** This function appends passed data to existing values + +## obj environment + +"Environment represents the Composition environment.\n\n\nTHIS IS AN ALPHA FIELD.\nDo not use it in production. It may be changed or removed without notice." + +### fn environment.withPatches + +```ts +withPatches(patches) +``` + +"Patches is a list of environment patches that are executed before a\ncomposition's resources are composed. These patches are between the XR\nand the Environment. Either from the Environment to the XR, or vice\nversa." + +### fn environment.withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +"Patches is a list of environment patches that are executed before a\ncomposition's resources are composed. These patches are between the XR\nand the Environment. Either from the Environment to the XR, or vice\nversa." + +**Note:** This function appends passed data to existing values + +## obj environment.patches + +"Patches is a list of environment patches that are executed before a\ncomposition's resources are composed. These patches are between the XR\nand the Environment. Either from the Environment to the XR, or vice\nversa." + +### fn environment.patches.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the resource whose value is\nto be used as input. Required when type is FromCompositeFieldPath or\nToCompositeFieldPath." + +### fn environment.patches.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath is the path of the field on the resource whose value will\nbe changed with the result of transforms. Leave empty if you'd like to\npropagate to the same path as fromFieldPath." + +### fn environment.patches.withTransforms + +```ts +withTransforms(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn environment.patches.withTransformsMixin + +```ts +withTransformsMixin(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +**Note:** This function appends passed data to existing values + +### fn environment.patches.withType + +```ts +withType(type) +``` + +"Type sets the patching behaviour to be used. Each patch type may require\nits own fields to be set on the Patch object." + +## obj environment.patches.combine + +"Combine is the patch configuration for a CombineFromComposite,\nCombineToComposite patch." + +### fn environment.patches.combine.withStrategy + +```ts +withStrategy(strategy) +``` + +"Strategy defines the strategy to use to combine the input variable values.\nCurrently only string is supported." + +### fn environment.patches.combine.withVariables + +```ts +withVariables(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn environment.patches.combine.withVariablesMixin + +```ts +withVariablesMixin(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +**Note:** This function appends passed data to existing values + +## obj environment.patches.combine.string + +"String declares that input variables should be combined into a single\nstring, using the relevant settings for formatting purposes." + +### fn environment.patches.combine.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +## obj environment.patches.combine.variables + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn environment.patches.combine.variables.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the source whose value is\nto be used as input." + +## obj environment.patches.policy + +"Policy configures the specifics of patching behaviour." + +### fn environment.patches.policy.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath specifies how to patch from a field path. The default is\n'Optional', which means the patch will be a no-op if the specified\nfromFieldPath does not exist. Use 'Required' to prevent the creation of a\nnew composed resource until the required path exists." + +### fn environment.patches.policy.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath specifies how to patch to a field path. The default is\n'Replace', which means the patch will completely replace the target field,\nor create it if it does not exist. Use 'MergeObjects' to recursively merge the patch\nobject with the target object, while keeping target object keys, but overwriting any array values, or use\n'MergeObjectsAppendArrays' to recursively merge the patch object with the target object, while keeping\ntarget object keys and appending any array values to target array values, or use\n'ForceMergeObjects' to recursively merge the patch object with the target object, overwriting\nany target object keys, including array values, or use\n'ForceMergeObjectsAppendArrays' to recursively merge the patch object with the target object,\noverwriting target object keys, and appending any array values to target array values.\n'MergeObject' is deprecated, use 'MergeObjects' instead, which is functionally identical.\n'AppendArray' is deprecated, use 'ForceMergeObjectsAppendArrays' instead, which is functionally identical." + +## obj environment.patches.transforms + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn environment.patches.transforms.withMap + +```ts +withMap(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +### fn environment.patches.transforms.withMapMixin + +```ts +withMapMixin(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +**Note:** This function appends passed data to existing values + +### fn environment.patches.transforms.withType + +```ts +withType(type) +``` + +"Type of the transform to be run." + +## obj environment.patches.transforms.convert + +"Convert is used to cast the input into the given output type." + +### fn environment.patches.transforms.convert.withFormat + +```ts +withFormat(format) +``` + +"The expected input format.\n\n\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\nOnly used during `string -> float64` conversions.\n* `json` - parses the input as a JSON string.\nOnly used during `string -> object` or `string -> list` conversions.\n\n\nIf this property is null, the default conversion is applied." + +### fn environment.patches.transforms.convert.withToType + +```ts +withToType(toType) +``` + +"ToType is the type of the output of this transform." + +## obj environment.patches.transforms.match + +"Match is a more complex version of Map that matches a list of patterns." + +### fn environment.patches.transforms.match.withFallbackTo + +```ts +withFallbackTo(fallbackTo) +``` + +"Determines to what value the transform should fallback if no pattern matches." + +### fn environment.patches.transforms.match.withFallbackValue + +```ts +withFallbackValue(fallbackValue) +``` + +"The fallback value that should be returned by the transform if now pattern\nmatches." + +### fn environment.patches.transforms.match.withPatterns + +```ts +withPatterns(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn environment.patches.transforms.match.withPatternsMixin + +```ts +withPatternsMixin(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +**Note:** This function appends passed data to existing values + +## obj environment.patches.transforms.match.patterns + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn environment.patches.transforms.match.patterns.withLiteral + +```ts +withLiteral(literal) +``` + +"Literal exactly matches the input string (case sensitive).\nIs required if `type` is `literal`." + +### fn environment.patches.transforms.match.patterns.withRegexp + +```ts +withRegexp(regexp) +``` + +"Regexp to match against the input string.\nIs required if `type` is `regexp`." + +### fn environment.patches.transforms.match.patterns.withResult + +```ts +withResult(result) +``` + +"The value that is used as result of the transform if the pattern matches." + +### fn environment.patches.transforms.match.patterns.withType + +```ts +withType(type) +``` + +"Type specifies how the pattern matches the input.\n\n\n* `literal` - the pattern value has to exactly match (case sensitive) the\ninput string. This is the default.\n\n\n* `regexp` - the pattern treated as a regular expression against\nwhich the input string is tested. Crossplane will throw an error if the\nkey is not a valid regexp." + +## obj environment.patches.transforms.math + +"Math is used to transform the input via mathematical operations such as\nmultiplication." + +### fn environment.patches.transforms.math.withClampMax + +```ts +withClampMax(clampMax) +``` + +"ClampMax makes sure that the value is not bigger than the given value." + +### fn environment.patches.transforms.math.withClampMin + +```ts +withClampMin(clampMin) +``` + +"ClampMin makes sure that the value is not smaller than the given value." + +### fn environment.patches.transforms.math.withMultiply + +```ts +withMultiply(multiply) +``` + +"Multiply the value." + +### fn environment.patches.transforms.math.withType + +```ts +withType(type) +``` + +"Type of the math transform to be run." + +## obj environment.patches.transforms.string + +"String is used to transform the input into a string or a different kind\nof string. Note that the input does not necessarily need to be a string." + +### fn environment.patches.transforms.string.withConvert + +```ts +withConvert(convert) +``` + +"Optional conversion method to be specified.\n`ToUpper` and `ToLower` change the letter case of the input string.\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\n`ToJson` converts any input value into its raw JSON representation.\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\nconverted to JSON." + +### fn environment.patches.transforms.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +### fn environment.patches.transforms.string.withTrim + +```ts +withTrim(trim) +``` + +"Trim the prefix or suffix from the input" + +### fn environment.patches.transforms.string.withType + +```ts +withType(type) +``` + +"Type of the string transform to be run." + +## obj environment.patches.transforms.string.join + +"Join the input strings." + +### fn environment.patches.transforms.string.join.withSeparator + +```ts +withSeparator(separator) +``` + +"Separator to join the input strings." + +## obj environment.patches.transforms.string.regexp + +"Extract a match from the input using a regular expression." + +### fn environment.patches.transforms.string.regexp.withGroup + +```ts +withGroup(group) +``` + +"Group number to match. 0 (the default) matches the entire expression." + +### fn environment.patches.transforms.string.regexp.withMatch + +```ts +withMatch(match) +``` + +"Match string. May optionally include submatches, aka capture groups.\nSee https://pkg.go.dev/regexp/ for details." + +## obj environment.patches.transforms.string.replace + +"Search/Replace applied to the input string." + +### fn environment.patches.transforms.string.replace.withReplace + +```ts +withReplace(replace) +``` + +"The Replace string replaces all occurrences of the search string." + +### fn environment.patches.transforms.string.replace.withSearch + +```ts +withSearch(search) +``` + +"The Search string to match." + +## obj metadata + +"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." + +### fn metadata.withAnnotations + +```ts +withAnnotations(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +### fn metadata.withAnnotationsMixin + +```ts +withAnnotationsMixin(annotations) +``` + +"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + +**Note:** This function appends passed data to existing values + +### fn metadata.withClusterName + +```ts +withClusterName(clusterName) +``` + +"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." + +### fn metadata.withCreationTimestamp + +```ts +withCreationTimestamp(creationTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withDeletionGracePeriodSeconds + +```ts +withDeletionGracePeriodSeconds(deletionGracePeriodSeconds) +``` + +"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." + +### fn metadata.withDeletionTimestamp + +```ts +withDeletionTimestamp(deletionTimestamp) +``` + +"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers." + +### fn metadata.withFinalizers + +```ts +withFinalizers(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +### fn metadata.withFinalizersMixin + +```ts +withFinalizersMixin(finalizers) +``` + +"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list." + +**Note:** This function appends passed data to existing values + +### fn metadata.withGenerateName + +```ts +withGenerateName(generateName) +``` + +"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + +### fn metadata.withGeneration + +```ts +withGeneration(generation) +``` + +"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." + +### fn metadata.withLabels + +```ts +withLabels(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +### fn metadata.withLabelsMixin + +```ts +withLabelsMixin(labels) +``` + +"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + +**Note:** This function appends passed data to existing values + +### fn metadata.withName + +```ts +withName(name) +``` + +"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" + +### fn metadata.withNamespace + +```ts +withNamespace(namespace) +``` + +"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + +### fn metadata.withOwnerReferences + +```ts +withOwnerReferences(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +### fn metadata.withOwnerReferencesMixin + +```ts +withOwnerReferencesMixin(ownerReferences) +``` + +"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." + +**Note:** This function appends passed data to existing values + +### fn metadata.withResourceVersion + +```ts +withResourceVersion(resourceVersion) +``` + +"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + +### fn metadata.withSelfLink + +```ts +withSelfLink(selfLink) +``` + +"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release." + +### fn metadata.withUid + +```ts +withUid(uid) +``` + +"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + +## obj patchSets + +"PatchSets define a named set of patches that may be included by any\nresource. PatchSets cannot themselves refer to other PatchSets." + +### fn patchSets.withName + +```ts +withName(name) +``` + +"Name of this PatchSet." + +### fn patchSets.withPatches + +```ts +withPatches(patches) +``` + +"Patches will be applied as an overlay to the base resource." + +### fn patchSets.withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +"Patches will be applied as an overlay to the base resource." + +**Note:** This function appends passed data to existing values + +## obj patchSets.patches + +"Patches will be applied as an overlay to the base resource." + +### fn patchSets.patches.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the resource whose value is\nto be used as input. Required when type is FromCompositeFieldPath or\nToCompositeFieldPath." + +### fn patchSets.patches.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath is the path of the field on the resource whose value will\nbe changed with the result of transforms. Leave empty if you'd like to\npropagate to the same path as fromFieldPath." + +### fn patchSets.patches.withTransforms + +```ts +withTransforms(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn patchSets.patches.withTransformsMixin + +```ts +withTransformsMixin(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +**Note:** This function appends passed data to existing values + +### fn patchSets.patches.withType + +```ts +withType(type) +``` + +"Type sets the patching behaviour to be used. Each patch type may require\nits own fields to be set on the ComposedPatch object." + +## obj patchSets.patches.combine + +"Combine is the patch configuration for a CombineFromComposite,\nCombineToComposite patch." + +### fn patchSets.patches.combine.withStrategy + +```ts +withStrategy(strategy) +``` + +"Strategy defines the strategy to use to combine the input variable values.\nCurrently only string is supported." + +### fn patchSets.patches.combine.withVariables + +```ts +withVariables(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn patchSets.patches.combine.withVariablesMixin + +```ts +withVariablesMixin(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +**Note:** This function appends passed data to existing values + +## obj patchSets.patches.combine.string + +"String declares that input variables should be combined into a single\nstring, using the relevant settings for formatting purposes." + +### fn patchSets.patches.combine.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +## obj patchSets.patches.combine.variables + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn patchSets.patches.combine.variables.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the source whose value is\nto be used as input." + +## obj patchSets.patches.policy + +"Policy configures the specifics of patching behaviour." + +### fn patchSets.patches.policy.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath specifies how to patch from a field path. The default is\n'Optional', which means the patch will be a no-op if the specified\nfromFieldPath does not exist. Use 'Required' to prevent the creation of a\nnew composed resource until the required path exists." + +### fn patchSets.patches.policy.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath specifies how to patch to a field path. The default is\n'Replace', which means the patch will completely replace the target field,\nor create it if it does not exist. Use 'MergeObjects' to recursively merge the patch\nobject with the target object, while keeping target object keys, but overwriting any array values, or use\n'MergeObjectsAppendArrays' to recursively merge the patch object with the target object, while keeping\ntarget object keys and appending any array values to target array values, or use\n'ForceMergeObjects' to recursively merge the patch object with the target object, overwriting\nany target object keys, including array values, or use\n'ForceMergeObjectsAppendArrays' to recursively merge the patch object with the target object,\noverwriting target object keys, and appending any array values to target array values.\n'MergeObject' is deprecated, use 'MergeObjects' instead, which is functionally identical.\n'AppendArray' is deprecated, use 'ForceMergeObjectsAppendArrays' instead, which is functionally identical." + +## obj patchSets.patches.transforms + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn patchSets.patches.transforms.withMap + +```ts +withMap(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +### fn patchSets.patches.transforms.withMapMixin + +```ts +withMapMixin(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +**Note:** This function appends passed data to existing values + +### fn patchSets.patches.transforms.withType + +```ts +withType(type) +``` + +"Type of the transform to be run." + +## obj patchSets.patches.transforms.convert + +"Convert is used to cast the input into the given output type." + +### fn patchSets.patches.transforms.convert.withFormat + +```ts +withFormat(format) +``` + +"The expected input format.\n\n\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\nOnly used during `string -> float64` conversions.\n* `json` - parses the input as a JSON string.\nOnly used during `string -> object` or `string -> list` conversions.\n\n\nIf this property is null, the default conversion is applied." + +### fn patchSets.patches.transforms.convert.withToType + +```ts +withToType(toType) +``` + +"ToType is the type of the output of this transform." + +## obj patchSets.patches.transforms.match + +"Match is a more complex version of Map that matches a list of patterns." + +### fn patchSets.patches.transforms.match.withFallbackTo + +```ts +withFallbackTo(fallbackTo) +``` + +"Determines to what value the transform should fallback if no pattern matches." + +### fn patchSets.patches.transforms.match.withFallbackValue + +```ts +withFallbackValue(fallbackValue) +``` + +"The fallback value that should be returned by the transform if now pattern\nmatches." + +### fn patchSets.patches.transforms.match.withPatterns + +```ts +withPatterns(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn patchSets.patches.transforms.match.withPatternsMixin + +```ts +withPatternsMixin(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +**Note:** This function appends passed data to existing values + +## obj patchSets.patches.transforms.match.patterns + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn patchSets.patches.transforms.match.patterns.withLiteral + +```ts +withLiteral(literal) +``` + +"Literal exactly matches the input string (case sensitive).\nIs required if `type` is `literal`." + +### fn patchSets.patches.transforms.match.patterns.withRegexp + +```ts +withRegexp(regexp) +``` + +"Regexp to match against the input string.\nIs required if `type` is `regexp`." + +### fn patchSets.patches.transforms.match.patterns.withResult + +```ts +withResult(result) +``` + +"The value that is used as result of the transform if the pattern matches." + +### fn patchSets.patches.transforms.match.patterns.withType + +```ts +withType(type) +``` + +"Type specifies how the pattern matches the input.\n\n\n* `literal` - the pattern value has to exactly match (case sensitive) the\ninput string. This is the default.\n\n\n* `regexp` - the pattern treated as a regular expression against\nwhich the input string is tested. Crossplane will throw an error if the\nkey is not a valid regexp." + +## obj patchSets.patches.transforms.math + +"Math is used to transform the input via mathematical operations such as\nmultiplication." + +### fn patchSets.patches.transforms.math.withClampMax + +```ts +withClampMax(clampMax) +``` + +"ClampMax makes sure that the value is not bigger than the given value." + +### fn patchSets.patches.transforms.math.withClampMin + +```ts +withClampMin(clampMin) +``` + +"ClampMin makes sure that the value is not smaller than the given value." + +### fn patchSets.patches.transforms.math.withMultiply + +```ts +withMultiply(multiply) +``` + +"Multiply the value." + +### fn patchSets.patches.transforms.math.withType + +```ts +withType(type) +``` + +"Type of the math transform to be run." + +## obj patchSets.patches.transforms.string + +"String is used to transform the input into a string or a different kind\nof string. Note that the input does not necessarily need to be a string." + +### fn patchSets.patches.transforms.string.withConvert + +```ts +withConvert(convert) +``` + +"Optional conversion method to be specified.\n`ToUpper` and `ToLower` change the letter case of the input string.\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\n`ToJson` converts any input value into its raw JSON representation.\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\nconverted to JSON." + +### fn patchSets.patches.transforms.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +### fn patchSets.patches.transforms.string.withTrim + +```ts +withTrim(trim) +``` + +"Trim the prefix or suffix from the input" + +### fn patchSets.patches.transforms.string.withType + +```ts +withType(type) +``` + +"Type of the string transform to be run." + +## obj patchSets.patches.transforms.string.join + +"Join the input strings." + +### fn patchSets.patches.transforms.string.join.withSeparator + +```ts +withSeparator(separator) +``` + +"Separator to join the input strings." + +## obj patchSets.patches.transforms.string.regexp + +"Extract a match from the input using a regular expression." + +### fn patchSets.patches.transforms.string.regexp.withGroup + +```ts +withGroup(group) +``` + +"Group number to match. 0 (the default) matches the entire expression." + +### fn patchSets.patches.transforms.string.regexp.withMatch + +```ts +withMatch(match) +``` + +"Match string. May optionally include submatches, aka capture groups.\nSee https://pkg.go.dev/regexp/ for details." + +## obj patchSets.patches.transforms.string.replace + +"Search/Replace applied to the input string." + +### fn patchSets.patches.transforms.string.replace.withReplace + +```ts +withReplace(replace) +``` + +"The Replace string replaces all occurrences of the search string." + +### fn patchSets.patches.transforms.string.replace.withSearch + +```ts +withSearch(search) +``` + +"The Search string to match." + +## obj resources + +"Resources is a list of resource templates that will be used when a\ncomposite resource is created." + +### fn resources.withBase + +```ts +withBase(base) +``` + +"Base of the composed resource that patches will be applied to and from.\nIf base is omitted, a previous Function within the pipeline must have\nproduced the named composed resource. Patches will be applied to and from\nthat resource. If base is specified, and a previous Function within the\npipeline produced the name composed resource, it will be overwritten." + +### fn resources.withBaseMixin + +```ts +withBaseMixin(base) +``` + +"Base of the composed resource that patches will be applied to and from.\nIf base is omitted, a previous Function within the pipeline must have\nproduced the named composed resource. Patches will be applied to and from\nthat resource. If base is specified, and a previous Function within the\npipeline produced the name composed resource, it will be overwritten." + +**Note:** This function appends passed data to existing values + +### fn resources.withConnectionDetails + +```ts +withConnectionDetails(connectionDetails) +``` + +"ConnectionDetails lists the propagation secret keys from this composed\nresource to the composition instance connection secret." + +### fn resources.withConnectionDetailsMixin + +```ts +withConnectionDetailsMixin(connectionDetails) +``` + +"ConnectionDetails lists the propagation secret keys from this composed\nresource to the composition instance connection secret." + +**Note:** This function appends passed data to existing values + +### fn resources.withName + +```ts +withName(name) +``` + +"A Name uniquely identifies this entry within its resources array." + +### fn resources.withPatches + +```ts +withPatches(patches) +``` + +"Patches to and from the composed resource." + +### fn resources.withPatchesMixin + +```ts +withPatchesMixin(patches) +``` + +"Patches to and from the composed resource." + +**Note:** This function appends passed data to existing values + +### fn resources.withReadinessChecks + +```ts +withReadinessChecks(readinessChecks) +``` + +"ReadinessChecks allows users to define custom readiness checks. All\nchecks have to return true in order for resource to be considered ready.\nThe default readiness check is to have the \"Ready\" condition to be\n\"True\"." + +### fn resources.withReadinessChecksMixin + +```ts +withReadinessChecksMixin(readinessChecks) +``` + +"ReadinessChecks allows users to define custom readiness checks. All\nchecks have to return true in order for resource to be considered ready.\nThe default readiness check is to have the \"Ready\" condition to be\n\"True\"." + +**Note:** This function appends passed data to existing values + +## obj resources.connectionDetails + +"ConnectionDetails lists the propagation secret keys from this composed\nresource to the composition instance connection secret." + +### fn resources.connectionDetails.withFromConnectionSecretKey + +```ts +withFromConnectionSecretKey(fromConnectionSecretKey) +``` + +"FromConnectionSecretKey is the key that will be used to fetch the value\nfrom the composed resource's connection secret." + +### fn resources.connectionDetails.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the composed resource whose\nvalue to be used as input. Name must be specified if the type is\nFromFieldPath." + +### fn resources.connectionDetails.withName + +```ts +withName(name) +``` + +"Name of the connection secret key that will be propagated to the\nconnection secret of the composed resource." + +### fn resources.connectionDetails.withType + +```ts +withType(type) +``` + +"Type sets the connection detail fetching behavior to be used. Each\nconnection detail type may require its own fields to be set on the\nConnectionDetail object." + +### fn resources.connectionDetails.withValue + +```ts +withValue(value) +``` + +"Value that will be propagated to the connection secret of the composite\nresource. May be set to inject a fixed, non-sensitive connection secret\nvalue, for example a well-known port." + +## obj resources.patches + +"Patches to and from the composed resource." + +### fn resources.patches.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the resource whose value is\nto be used as input. Required when type is FromCompositeFieldPath or\nToCompositeFieldPath." + +### fn resources.patches.withPatchSetName + +```ts +withPatchSetName(patchSetName) +``` + +"PatchSetName to include patches from. Required when type is PatchSet." + +### fn resources.patches.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath is the path of the field on the resource whose value will\nbe changed with the result of transforms. Leave empty if you'd like to\npropagate to the same path as fromFieldPath." + +### fn resources.patches.withTransforms + +```ts +withTransforms(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn resources.patches.withTransformsMixin + +```ts +withTransformsMixin(transforms) +``` + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +**Note:** This function appends passed data to existing values + +### fn resources.patches.withType + +```ts +withType(type) +``` + +"Type sets the patching behaviour to be used. Each patch type may require\nits own fields to be set on the ComposedPatch object." + +## obj resources.patches.combine + +"Combine is the patch configuration for a CombineFromComposite,\nCombineToComposite patch." + +### fn resources.patches.combine.withStrategy + +```ts +withStrategy(strategy) +``` + +"Strategy defines the strategy to use to combine the input variable values.\nCurrently only string is supported." + +### fn resources.patches.combine.withVariables + +```ts +withVariables(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn resources.patches.combine.withVariablesMixin + +```ts +withVariablesMixin(variables) +``` + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +**Note:** This function appends passed data to existing values + +## obj resources.patches.combine.string + +"String declares that input variables should be combined into a single\nstring, using the relevant settings for formatting purposes." + +### fn resources.patches.combine.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +## obj resources.patches.combine.variables + +"Variables are the list of variables whose values will be retrieved and\ncombined." + +### fn resources.patches.combine.variables.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath is the path of the field on the source whose value is\nto be used as input." + +## obj resources.patches.policy + +"Policy configures the specifics of patching behaviour." + +### fn resources.patches.policy.withFromFieldPath + +```ts +withFromFieldPath(fromFieldPath) +``` + +"FromFieldPath specifies how to patch from a field path. The default is\n'Optional', which means the patch will be a no-op if the specified\nfromFieldPath does not exist. Use 'Required' to prevent the creation of a\nnew composed resource until the required path exists." + +### fn resources.patches.policy.withToFieldPath + +```ts +withToFieldPath(toFieldPath) +``` + +"ToFieldPath specifies how to patch to a field path. The default is\n'Replace', which means the patch will completely replace the target field,\nor create it if it does not exist. Use 'MergeObjects' to recursively merge the patch\nobject with the target object, while keeping target object keys, but overwriting any array values, or use\n'MergeObjectsAppendArrays' to recursively merge the patch object with the target object, while keeping\ntarget object keys and appending any array values to target array values, or use\n'ForceMergeObjects' to recursively merge the patch object with the target object, overwriting\nany target object keys, including array values, or use\n'ForceMergeObjectsAppendArrays' to recursively merge the patch object with the target object,\noverwriting target object keys, and appending any array values to target array values.\n'MergeObject' is deprecated, use 'MergeObjects' instead, which is functionally identical.\n'AppendArray' is deprecated, use 'ForceMergeObjectsAppendArrays' instead, which is functionally identical." + +## obj resources.patches.transforms + +"Transforms are the list of functions that are used as a FIFO pipe for the\ninput to be transformed." + +### fn resources.patches.transforms.withMap + +```ts +withMap(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +### fn resources.patches.transforms.withMapMixin + +```ts +withMapMixin(map) +``` + +"Map uses the input as a key in the given map and returns the value." + +**Note:** This function appends passed data to existing values + +### fn resources.patches.transforms.withType + +```ts +withType(type) +``` + +"Type of the transform to be run." + +## obj resources.patches.transforms.convert + +"Convert is used to cast the input into the given output type." + +### fn resources.patches.transforms.convert.withFormat + +```ts +withFormat(format) +``` + +"The expected input format.\n\n\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\nOnly used during `string -> float64` conversions.\n* `json` - parses the input as a JSON string.\nOnly used during `string -> object` or `string -> list` conversions.\n\n\nIf this property is null, the default conversion is applied." + +### fn resources.patches.transforms.convert.withToType + +```ts +withToType(toType) +``` + +"ToType is the type of the output of this transform." + +## obj resources.patches.transforms.match + +"Match is a more complex version of Map that matches a list of patterns." + +### fn resources.patches.transforms.match.withFallbackTo + +```ts +withFallbackTo(fallbackTo) +``` + +"Determines to what value the transform should fallback if no pattern matches." + +### fn resources.patches.transforms.match.withFallbackValue + +```ts +withFallbackValue(fallbackValue) +``` + +"The fallback value that should be returned by the transform if now pattern\nmatches." + +### fn resources.patches.transforms.match.withPatterns + +```ts +withPatterns(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn resources.patches.transforms.match.withPatternsMixin + +```ts +withPatternsMixin(patterns) +``` + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +**Note:** This function appends passed data to existing values + +## obj resources.patches.transforms.match.patterns + +"The patterns that should be tested against the input string.\nPatterns are tested in order. The value of the first match is used as\nresult of this transform." + +### fn resources.patches.transforms.match.patterns.withLiteral + +```ts +withLiteral(literal) +``` + +"Literal exactly matches the input string (case sensitive).\nIs required if `type` is `literal`." + +### fn resources.patches.transforms.match.patterns.withRegexp + +```ts +withRegexp(regexp) +``` + +"Regexp to match against the input string.\nIs required if `type` is `regexp`." + +### fn resources.patches.transforms.match.patterns.withResult + +```ts +withResult(result) +``` + +"The value that is used as result of the transform if the pattern matches." + +### fn resources.patches.transforms.match.patterns.withType + +```ts +withType(type) +``` + +"Type specifies how the pattern matches the input.\n\n\n* `literal` - the pattern value has to exactly match (case sensitive) the\ninput string. This is the default.\n\n\n* `regexp` - the pattern treated as a regular expression against\nwhich the input string is tested. Crossplane will throw an error if the\nkey is not a valid regexp." + +## obj resources.patches.transforms.math + +"Math is used to transform the input via mathematical operations such as\nmultiplication." + +### fn resources.patches.transforms.math.withClampMax + +```ts +withClampMax(clampMax) +``` + +"ClampMax makes sure that the value is not bigger than the given value." + +### fn resources.patches.transforms.math.withClampMin + +```ts +withClampMin(clampMin) +``` + +"ClampMin makes sure that the value is not smaller than the given value." + +### fn resources.patches.transforms.math.withMultiply + +```ts +withMultiply(multiply) +``` + +"Multiply the value." + +### fn resources.patches.transforms.math.withType + +```ts +withType(type) +``` + +"Type of the math transform to be run." + +## obj resources.patches.transforms.string + +"String is used to transform the input into a string or a different kind\nof string. Note that the input does not necessarily need to be a string." + +### fn resources.patches.transforms.string.withConvert + +```ts +withConvert(convert) +``` + +"Optional conversion method to be specified.\n`ToUpper` and `ToLower` change the letter case of the input string.\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\n`ToJson` converts any input value into its raw JSON representation.\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\nconverted to JSON." + +### fn resources.patches.transforms.string.withFmt + +```ts +withFmt(fmt) +``` + +"Format the input using a Go format string. See\nhttps://golang.org/pkg/fmt/ for details." + +### fn resources.patches.transforms.string.withTrim + +```ts +withTrim(trim) +``` + +"Trim the prefix or suffix from the input" + +### fn resources.patches.transforms.string.withType + +```ts +withType(type) +``` + +"Type of the string transform to be run." + +## obj resources.patches.transforms.string.join + +"Join the input strings." + +### fn resources.patches.transforms.string.join.withSeparator + +```ts +withSeparator(separator) +``` + +"Separator to join the input strings." + +## obj resources.patches.transforms.string.regexp + +"Extract a match from the input using a regular expression." + +### fn resources.patches.transforms.string.regexp.withGroup + +```ts +withGroup(group) +``` + +"Group number to match. 0 (the default) matches the entire expression." + +### fn resources.patches.transforms.string.regexp.withMatch + +```ts +withMatch(match) +``` + +"Match string. May optionally include submatches, aka capture groups.\nSee https://pkg.go.dev/regexp/ for details." + +## obj resources.patches.transforms.string.replace + +"Search/Replace applied to the input string." + +### fn resources.patches.transforms.string.replace.withReplace + +```ts +withReplace(replace) +``` + +"The Replace string replaces all occurrences of the search string." + +### fn resources.patches.transforms.string.replace.withSearch + +```ts +withSearch(search) +``` + +"The Search string to match." + +## obj resources.readinessChecks + +"ReadinessChecks allows users to define custom readiness checks. All\nchecks have to return true in order for resource to be considered ready.\nThe default readiness check is to have the \"Ready\" condition to be\n\"True\"." + +### fn resources.readinessChecks.withFieldPath + +```ts +withFieldPath(fieldPath) +``` + +"FieldPath shows the path of the field whose value will be used." + +### fn resources.readinessChecks.withMatchInteger + +```ts +withMatchInteger(matchInteger) +``` + +"MatchInt is the value you'd like to match if you're using \"MatchInt\" type." + +### fn resources.readinessChecks.withMatchString + +```ts +withMatchString(matchString) +``` + +"MatchString is the value you'd like to match if you're using \"MatchString\" type." + +### fn resources.readinessChecks.withType + +```ts +withType(type) +``` + +"Type indicates the type of probe you'd like to use." + +## obj resources.readinessChecks.matchCondition + +"MatchCondition specifies the condition you'd like to match if you're using \"MatchCondition\" type." + +### fn resources.readinessChecks.matchCondition.withType + +```ts +withType(type) +``` + +"Type indicates the type of condition you'd like to use." \ No newline at end of file diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css new file mode 100644 index 0000000..ed54bbf --- /dev/null +++ b/docs/stylesheets/extra.css @@ -0,0 +1,48 @@ +/* Menu */ +/* Decapitalize menu items, Mkdocs capilizes by default and can't be configured */ +nav nav .md-nav__link .md-ellipsis::first-letter { text-transform: lowercase } + +/* TOC */ +/* Don't wrap TOC links with spaces */ +.md-sidebar--secondary .md-nav__item { white-space: nowrap } +/* Scroll horizontal to make them accessible */ +.md-sidebar--secondary .md-sidebar__scrollwrap { overflow-x: auto } + +/* Code */ +/* Make codeblocks stand out */ +.md-typeset pre>code { border-left: 0.2rem solid var(--md-accent-fg-color) } + +/* Headings */ +/* Hide headings for functions but don't remove them, they function as deeplink targets */ +h2[id^="fn-"], +h3[id^="fn-"], +h4[id^="fn-"], +h5[id^="fn-"], +h6[id^="fn-"], +h7[id^="fn-"], +h8[id^="fn-"], +h9[id^="fn-"] { + visibility: hidden; + width: 0; + height: 0; + padding: 0; + margin: 0; +} + +/* Parameters */ +/* Format 'PARAMETERS' after highlight (div.highlight) */ +/* Match first paragraph (p) but only if it is followed by an unsorted list (ul) */ +article.md-content__inner.md-typeset div.highlight+p:has(+ul) { + padding-left: 1em; + margin-top: 0; + margin-bottom: 0; +} +/* Match first unsorted list (ul) after paragraph (p) */ +article.md-content__inner.md-typeset div.highlight+p+ul { + padding-left: 1em; + margin-top:0; +} +article.md-content__inner.md-typeset div.highlight+p+ul li { + margin-top: 0; + margin-bottom: 0; +} diff --git a/function-cel-filter/0.1/_gen/cel/main.libsonnet b/function-cel-filter/0.1/_gen/cel/main.libsonnet new file mode 100644 index 0000000..d6663a2 --- /dev/null +++ b/function-cel-filter/0.1/_gen/cel/main.libsonnet @@ -0,0 +1,5 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='cel', url='', help=''), + v1beta1: (import 'v1beta1/main.libsonnet'), +} diff --git a/function-cel-filter/0.1/_gen/cel/v1beta1/filters.libsonnet b/function-cel-filter/0.1/_gen/cel/v1beta1/filters.libsonnet new file mode 100644 index 0000000..e6b2903 --- /dev/null +++ b/function-cel-filter/0.1/_gen/cel/v1beta1/filters.libsonnet @@ -0,0 +1,63 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='filters', url='', help='"Filters can be used to filter desired composed resources."'), + '#filters':: d.obj(help="\"Filters to apply to the desired composed resources produced by previous\\nfunctions in the pipeline. Each filter matches a desired composed\\nresource by name. If the expression evaluates to true, the composed\\nresource will be included. Desired composed resources that don't match\\nany filter are always included.\""), + filters: { + '#withExpression':: d.fn(help="\"Expression is a CEL expression. See https://github.com/google/cel-spec.\\nThe following top-level variables are available to the expression:\\n\\n\\n* observed\\n* desired\\n* context\\n\\n\\nExample expressions:\\n\\n\\n* observed.composite.resource.spec.widgets == 42\\n* observed.resources['composed'].connection_details['user'] == b'admin'\\n* desired.resources['composed'].resource.spec.widgets == 42\\n\\n\\nSee the RunFunctionRequest protobuf message for schema details.\\nhttps://buf.build/crossplane/crossplane/docs/main:apiextensions.fn.proto.v1beta1\"", args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withName':: d.fn(help="\"Name of the desired composed resource(s) this filter should match.\\n\\n\\nUse regular expressions to match multiple resources. Expressions are\\nautomatically prefixed with ^ and suffixed with $. For example 'buck.*'\\nbecomes '^buck.*$'. See https://github.com/google/re2/wiki/Syntax.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + }, + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Filters', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'cel.fn.crossplane.io/v1beta1', + kind: 'Filters', + } + self.metadata.withName(name=name), + '#withFilters':: d.fn(help="\"Filters to apply to the desired composed resources produced by previous\\nfunctions in the pipeline. Each filter matches a desired composed\\nresource by name. If the expression evaluates to true, the composed\\nresource will be included. Desired composed resources that don't match\\nany filter are always included.\"", args=[d.arg(name='filters', type=d.T.array)]), + withFilters(filters): { filters: if std.isArray(v=filters) then filters else [filters] }, + '#withFiltersMixin':: d.fn(help="\"Filters to apply to the desired composed resources produced by previous\\nfunctions in the pipeline. Each filter matches a desired composed\\nresource by name. If the expression evaluates to true, the composed\\nresource will be included. Desired composed resources that don't match\\nany filter are always included.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='filters', type=d.T.array)]), + withFiltersMixin(filters): { filters+: if std.isArray(v=filters) then filters else [filters] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/function-cel-filter/0.1/_gen/cel/v1beta1/main.libsonnet b/function-cel-filter/0.1/_gen/cel/v1beta1/main.libsonnet new file mode 100644 index 0000000..21e8ab0 --- /dev/null +++ b/function-cel-filter/0.1/_gen/cel/v1beta1/main.libsonnet @@ -0,0 +1,5 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1beta1', url='', help=''), + filters: (import 'filters.libsonnet'), +} diff --git a/function-cel-filter/0.1/gen.libsonnet b/function-cel-filter/0.1/gen.libsonnet new file mode 100644 index 0000000..2630adb --- /dev/null +++ b/function-cel-filter/0.1/gen.libsonnet @@ -0,0 +1,5 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='function_cel_filter', url='github.com/jsonnet-libs/crossplane-core-libsonnet/function-cel-filter/0.1/main.libsonnet', help=''), + cel:: (import '_gen/cel/main.libsonnet'), +} diff --git a/function-cel-filter/0.1/main.libsonnet b/function-cel-filter/0.1/main.libsonnet new file mode 100644 index 0000000..f5597a5 --- /dev/null +++ b/function-cel-filter/0.1/main.libsonnet @@ -0,0 +1 @@ +(import 'gen.libsonnet') diff --git a/function-patch-and-transform/0.7/_gen/pt/main.libsonnet b/function-patch-and-transform/0.7/_gen/pt/main.libsonnet new file mode 100644 index 0000000..6011b48 --- /dev/null +++ b/function-patch-and-transform/0.7/_gen/pt/main.libsonnet @@ -0,0 +1,5 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='pt', url='', help=''), + v1beta1: (import 'v1beta1/main.libsonnet'), +} diff --git a/function-patch-and-transform/0.7/_gen/pt/v1beta1/main.libsonnet b/function-patch-and-transform/0.7/_gen/pt/v1beta1/main.libsonnet new file mode 100644 index 0000000..3d6f195 --- /dev/null +++ b/function-patch-and-transform/0.7/_gen/pt/v1beta1/main.libsonnet @@ -0,0 +1,5 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1beta1', url='', help=''), + resources: (import 'resources.libsonnet'), +} diff --git a/function-patch-and-transform/0.7/_gen/pt/v1beta1/resources.libsonnet b/function-patch-and-transform/0.7/_gen/pt/v1beta1/resources.libsonnet new file mode 100644 index 0000000..b0e8b8d --- /dev/null +++ b/function-patch-and-transform/0.7/_gen/pt/v1beta1/resources.libsonnet @@ -0,0 +1,482 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resources', url='', help='"Resources specifies Patch & Transform resource templates."'), + '#environment':: d.obj(help='"Environment represents the Composition environment.\\n\\n\\nTHIS IS AN ALPHA FIELD.\\nDo not use it in production. It may be changed or removed without notice."'), + environment: { + '#patches':: d.obj(help="\"Patches is a list of environment patches that are executed before a\\ncomposition's resources are composed. These patches are between the XR\\nand the Environment. Either from the Environment to the XR, or vice\\nversa.\""), + patches: { + '#combine':: d.obj(help='"Combine is the patch configuration for a CombineFromComposite,\\nCombineToComposite patch."'), + combine: { + '#string':: d.obj(help='"String declares that input variables should be combined into a single\\nstring, using the relevant settings for formatting purposes."'), + string: { + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { combine+: { string+: { fmt: fmt } } }, + }, + '#variables':: d.obj(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."'), + variables: { + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the source whose value is\\nto be used as input."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + }, + '#withStrategy':: d.fn(help='"Strategy defines the strategy to use to combine the input variable values.\\nCurrently only string is supported."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { combine+: { strategy: strategy } }, + '#withVariables':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { combine+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { combine+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#policy':: d.obj(help='"Policy configures the specifics of patching behaviour."'), + policy: { + '#withFromFieldPath':: d.fn(help="\"FromFieldPath specifies how to patch from a field path. The default is\\n'Optional', which means the patch will be a no-op if the specified\\nfromFieldPath does not exist. Use 'Required' to prevent the creation of a\\nnew composed resource until the required path exists.\"", args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { policy+: { fromFieldPath: fromFieldPath } }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath specifies how to patch to a field path. The default is\\n'Replace', which means the patch will completely replace the target field,\\nor create it if it does not exist. Use 'MergeObjects' to recursively merge the patch\\nobject with the target object, while keeping target object keys, but overwriting any array values, or use\\n'MergeObjectsAppendArrays' to recursively merge the patch object with the target object, while keeping\\ntarget object keys and appending any array values to target array values, or use\\n'ForceMergeObjects' to recursively merge the patch object with the target object, overwriting\\nany target object keys, including array values, or use\\n'ForceMergeObjectsAppendArrays' to recursively merge the patch object with the target object,\\noverwriting target object keys, and appending any array values to target array values.\\n'MergeObject' is deprecated, use 'MergeObjects' instead, which is functionally identical.\\n'AppendArray' is deprecated, use 'ForceMergeObjectsAppendArrays' instead, which is functionally identical.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { policy+: { toFieldPath: toFieldPath } }, + }, + '#transforms':: d.obj(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."'), + transforms: { + '#convert':: d.obj(help='"Convert is used to cast the input into the given output type."'), + convert: { + '#withFormat':: d.fn(help='"The expected input format.\\n\\n\\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\\nOnly used during `string -> float64` conversions.\\n* `json` - parses the input as a JSON string.\\nOnly used during `string -> object` or `string -> list` conversions.\\n\\n\\nIf this property is null, the default conversion is applied."', args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { convert+: { format: format } }, + '#withToType':: d.fn(help='"ToType is the type of the output of this transform."', args=[d.arg(name='toType', type=d.T.string)]), + withToType(toType): { convert+: { toType: toType } }, + }, + '#match':: d.obj(help='"Match is a more complex version of Map that matches a list of patterns."'), + match: { + '#patterns':: d.obj(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."'), + patterns: { + '#withLiteral':: d.fn(help='"Literal exactly matches the input string (case sensitive).\\nIs required if `type` is `literal`."', args=[d.arg(name='literal', type=d.T.string)]), + withLiteral(literal): { literal: literal }, + '#withRegexp':: d.fn(help='"Regexp to match against the input string.\\nIs required if `type` is `regexp`."', args=[d.arg(name='regexp', type=d.T.string)]), + withRegexp(regexp): { regexp: regexp }, + '#withResult':: d.fn(help='"The value that is used as result of the transform if the pattern matches."', args=[d.arg(name='result', type=d.T.any)]), + withResult(result): { result: result }, + '#withType':: d.fn(help='"Type specifies how the pattern matches the input.\\n\\n\\n* `literal` - the pattern value has to exactly match (case sensitive) the\\ninput string. This is the default.\\n\\n\\n* `regexp` - the pattern treated as a regular expression against\\nwhich the input string is tested. Crossplane will throw an error if the\\nkey is not a valid regexp."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFallbackTo':: d.fn(help='"Determines to what value the transform should fallback if no pattern matches."', args=[d.arg(name='fallbackTo', type=d.T.string)]), + withFallbackTo(fallbackTo): { match+: { fallbackTo: fallbackTo } }, + '#withFallbackValue':: d.fn(help='"The fallback value that should be returned by the transform if now pattern\\nmatches."', args=[d.arg(name='fallbackValue', type=d.T.any)]), + withFallbackValue(fallbackValue): { match+: { fallbackValue: fallbackValue } }, + '#withPatterns':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."', args=[d.arg(name='patterns', type=d.T.array)]), + withPatterns(patterns): { match+: { patterns: if std.isArray(v=patterns) then patterns else [patterns] } }, + '#withPatternsMixin':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patterns', type=d.T.array)]), + withPatternsMixin(patterns): { match+: { patterns+: if std.isArray(v=patterns) then patterns else [patterns] } }, + }, + '#math':: d.obj(help='"Math is used to transform the input via mathematical operations such as\\nmultiplication."'), + math: { + '#withClampMax':: d.fn(help='"ClampMax makes sure that the value is not bigger than the given value."', args=[d.arg(name='clampMax', type=d.T.integer)]), + withClampMax(clampMax): { math+: { clampMax: clampMax } }, + '#withClampMin':: d.fn(help='"ClampMin makes sure that the value is not smaller than the given value."', args=[d.arg(name='clampMin', type=d.T.integer)]), + withClampMin(clampMin): { math+: { clampMin: clampMin } }, + '#withMultiply':: d.fn(help='"Multiply the value."', args=[d.arg(name='multiply', type=d.T.integer)]), + withMultiply(multiply): { math+: { multiply: multiply } }, + '#withType':: d.fn(help='"Type of the math transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { math+: { type: type } }, + }, + '#string':: d.obj(help='"String is used to transform the input into a string or a different kind\\nof string. Note that the input does not necessarily need to be a string."'), + string: { + '#join':: d.obj(help='"Join the input strings."'), + join: { + '#withSeparator':: d.fn(help='"Separator to join the input strings."', args=[d.arg(name='separator', type=d.T.string)]), + withSeparator(separator): { string+: { join+: { separator: separator } } }, + }, + '#regexp':: d.obj(help='"Extract a match from the input using a regular expression."'), + regexp: { + '#withGroup':: d.fn(help='"Group number to match. 0 (the default) matches the entire expression."', args=[d.arg(name='group', type=d.T.integer)]), + withGroup(group): { string+: { regexp+: { group: group } } }, + '#withMatch':: d.fn(help='"Match string. May optionally include submatches, aka capture groups.\\nSee https://pkg.go.dev/regexp/ for details."', args=[d.arg(name='match', type=d.T.string)]), + withMatch(match): { string+: { regexp+: { match: match } } }, + }, + '#replace':: d.obj(help='"Search/Replace applied to the input string."'), + replace: { + '#withReplace':: d.fn(help='"The Replace string replaces all occurrences of the search string."', args=[d.arg(name='replace', type=d.T.string)]), + withReplace(replace): { string+: { replace+: { replace: replace } } }, + '#withSearch':: d.fn(help='"The Search string to match."', args=[d.arg(name='search', type=d.T.string)]), + withSearch(search): { string+: { replace+: { search: search } } }, + }, + '#withConvert':: d.fn(help='"Optional conversion method to be specified.\\n`ToUpper` and `ToLower` change the letter case of the input string.\\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\\n`ToJson` converts any input value into its raw JSON representation.\\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\\nconverted to JSON."', args=[d.arg(name='convert', type=d.T.string)]), + withConvert(convert): { string+: { convert: convert } }, + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { string+: { fmt: fmt } }, + '#withTrim':: d.fn(help='"Trim the prefix or suffix from the input"', args=[d.arg(name='trim', type=d.T.string)]), + withTrim(trim): { string+: { trim: trim } }, + '#withType':: d.fn(help='"Type of the string transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { string+: { type: type } }, + }, + '#withMap':: d.fn(help='"Map uses the input as a key in the given map and returns the value."', args=[d.arg(name='map', type=d.T.object)]), + withMap(map): { map: map }, + '#withMapMixin':: d.fn(help='"Map uses the input as a key in the given map and returns the value."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='map', type=d.T.object)]), + withMapMixin(map): { map+: map }, + '#withType':: d.fn(help='"Type of the transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the resource whose value is\\nto be used as input. Required when type is FromCompositeFieldPath or\\nToCompositeFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath is the path of the field on the resource whose value will\\nbe changed with the result of transforms. Leave empty if you'd like to\\npropagate to the same path as fromFieldPath.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { toFieldPath: toFieldPath }, + '#withTransforms':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."', args=[d.arg(name='transforms', type=d.T.array)]), + withTransforms(transforms): { transforms: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withTransformsMixin':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='transforms', type=d.T.array)]), + withTransformsMixin(transforms): { transforms+: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withType':: d.fn(help='"Type sets the patching behaviour to be used. Each patch type may require\\nits own fields to be set on the Patch object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withPatches':: d.fn(help="\"Patches is a list of environment patches that are executed before a\\ncomposition's resources are composed. These patches are between the XR\\nand the Environment. Either from the Environment to the XR, or vice\\nversa.\"", args=[d.arg(name='patches', type=d.T.array)]), + withPatches(patches): { environment+: { patches: if std.isArray(v=patches) then patches else [patches] } }, + '#withPatchesMixin':: d.fn(help="\"Patches is a list of environment patches that are executed before a\\ncomposition's resources are composed. These patches are between the XR\\nand the Environment. Either from the Environment to the XR, or vice\\nversa.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='patches', type=d.T.array)]), + withPatchesMixin(patches): { environment+: { patches+: if std.isArray(v=patches) then patches else [patches] } }, + }, + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), + withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of Resources', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'pt.fn.crossplane.io/v1beta1', + kind: 'Resources', + } + self.metadata.withName(name=name), + '#patchSets':: d.obj(help='"PatchSets define a named set of patches that may be included by any\\nresource. PatchSets cannot themselves refer to other PatchSets."'), + patchSets: { + '#patches':: d.obj(help='"Patches will be applied as an overlay to the base resource."'), + patches: { + '#combine':: d.obj(help='"Combine is the patch configuration for a CombineFromComposite,\\nCombineToComposite patch."'), + combine: { + '#string':: d.obj(help='"String declares that input variables should be combined into a single\\nstring, using the relevant settings for formatting purposes."'), + string: { + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { combine+: { string+: { fmt: fmt } } }, + }, + '#variables':: d.obj(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."'), + variables: { + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the source whose value is\\nto be used as input."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + }, + '#withStrategy':: d.fn(help='"Strategy defines the strategy to use to combine the input variable values.\\nCurrently only string is supported."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { combine+: { strategy: strategy } }, + '#withVariables':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { combine+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { combine+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#policy':: d.obj(help='"Policy configures the specifics of patching behaviour."'), + policy: { + '#withFromFieldPath':: d.fn(help="\"FromFieldPath specifies how to patch from a field path. The default is\\n'Optional', which means the patch will be a no-op if the specified\\nfromFieldPath does not exist. Use 'Required' to prevent the creation of a\\nnew composed resource until the required path exists.\"", args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { policy+: { fromFieldPath: fromFieldPath } }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath specifies how to patch to a field path. The default is\\n'Replace', which means the patch will completely replace the target field,\\nor create it if it does not exist. Use 'MergeObjects' to recursively merge the patch\\nobject with the target object, while keeping target object keys, but overwriting any array values, or use\\n'MergeObjectsAppendArrays' to recursively merge the patch object with the target object, while keeping\\ntarget object keys and appending any array values to target array values, or use\\n'ForceMergeObjects' to recursively merge the patch object with the target object, overwriting\\nany target object keys, including array values, or use\\n'ForceMergeObjectsAppendArrays' to recursively merge the patch object with the target object,\\noverwriting target object keys, and appending any array values to target array values.\\n'MergeObject' is deprecated, use 'MergeObjects' instead, which is functionally identical.\\n'AppendArray' is deprecated, use 'ForceMergeObjectsAppendArrays' instead, which is functionally identical.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { policy+: { toFieldPath: toFieldPath } }, + }, + '#transforms':: d.obj(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."'), + transforms: { + '#convert':: d.obj(help='"Convert is used to cast the input into the given output type."'), + convert: { + '#withFormat':: d.fn(help='"The expected input format.\\n\\n\\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\\nOnly used during `string -> float64` conversions.\\n* `json` - parses the input as a JSON string.\\nOnly used during `string -> object` or `string -> list` conversions.\\n\\n\\nIf this property is null, the default conversion is applied."', args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { convert+: { format: format } }, + '#withToType':: d.fn(help='"ToType is the type of the output of this transform."', args=[d.arg(name='toType', type=d.T.string)]), + withToType(toType): { convert+: { toType: toType } }, + }, + '#match':: d.obj(help='"Match is a more complex version of Map that matches a list of patterns."'), + match: { + '#patterns':: d.obj(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."'), + patterns: { + '#withLiteral':: d.fn(help='"Literal exactly matches the input string (case sensitive).\\nIs required if `type` is `literal`."', args=[d.arg(name='literal', type=d.T.string)]), + withLiteral(literal): { literal: literal }, + '#withRegexp':: d.fn(help='"Regexp to match against the input string.\\nIs required if `type` is `regexp`."', args=[d.arg(name='regexp', type=d.T.string)]), + withRegexp(regexp): { regexp: regexp }, + '#withResult':: d.fn(help='"The value that is used as result of the transform if the pattern matches."', args=[d.arg(name='result', type=d.T.any)]), + withResult(result): { result: result }, + '#withType':: d.fn(help='"Type specifies how the pattern matches the input.\\n\\n\\n* `literal` - the pattern value has to exactly match (case sensitive) the\\ninput string. This is the default.\\n\\n\\n* `regexp` - the pattern treated as a regular expression against\\nwhich the input string is tested. Crossplane will throw an error if the\\nkey is not a valid regexp."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFallbackTo':: d.fn(help='"Determines to what value the transform should fallback if no pattern matches."', args=[d.arg(name='fallbackTo', type=d.T.string)]), + withFallbackTo(fallbackTo): { match+: { fallbackTo: fallbackTo } }, + '#withFallbackValue':: d.fn(help='"The fallback value that should be returned by the transform if now pattern\\nmatches."', args=[d.arg(name='fallbackValue', type=d.T.any)]), + withFallbackValue(fallbackValue): { match+: { fallbackValue: fallbackValue } }, + '#withPatterns':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."', args=[d.arg(name='patterns', type=d.T.array)]), + withPatterns(patterns): { match+: { patterns: if std.isArray(v=patterns) then patterns else [patterns] } }, + '#withPatternsMixin':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patterns', type=d.T.array)]), + withPatternsMixin(patterns): { match+: { patterns+: if std.isArray(v=patterns) then patterns else [patterns] } }, + }, + '#math':: d.obj(help='"Math is used to transform the input via mathematical operations such as\\nmultiplication."'), + math: { + '#withClampMax':: d.fn(help='"ClampMax makes sure that the value is not bigger than the given value."', args=[d.arg(name='clampMax', type=d.T.integer)]), + withClampMax(clampMax): { math+: { clampMax: clampMax } }, + '#withClampMin':: d.fn(help='"ClampMin makes sure that the value is not smaller than the given value."', args=[d.arg(name='clampMin', type=d.T.integer)]), + withClampMin(clampMin): { math+: { clampMin: clampMin } }, + '#withMultiply':: d.fn(help='"Multiply the value."', args=[d.arg(name='multiply', type=d.T.integer)]), + withMultiply(multiply): { math+: { multiply: multiply } }, + '#withType':: d.fn(help='"Type of the math transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { math+: { type: type } }, + }, + '#string':: d.obj(help='"String is used to transform the input into a string or a different kind\\nof string. Note that the input does not necessarily need to be a string."'), + string: { + '#join':: d.obj(help='"Join the input strings."'), + join: { + '#withSeparator':: d.fn(help='"Separator to join the input strings."', args=[d.arg(name='separator', type=d.T.string)]), + withSeparator(separator): { string+: { join+: { separator: separator } } }, + }, + '#regexp':: d.obj(help='"Extract a match from the input using a regular expression."'), + regexp: { + '#withGroup':: d.fn(help='"Group number to match. 0 (the default) matches the entire expression."', args=[d.arg(name='group', type=d.T.integer)]), + withGroup(group): { string+: { regexp+: { group: group } } }, + '#withMatch':: d.fn(help='"Match string. May optionally include submatches, aka capture groups.\\nSee https://pkg.go.dev/regexp/ for details."', args=[d.arg(name='match', type=d.T.string)]), + withMatch(match): { string+: { regexp+: { match: match } } }, + }, + '#replace':: d.obj(help='"Search/Replace applied to the input string."'), + replace: { + '#withReplace':: d.fn(help='"The Replace string replaces all occurrences of the search string."', args=[d.arg(name='replace', type=d.T.string)]), + withReplace(replace): { string+: { replace+: { replace: replace } } }, + '#withSearch':: d.fn(help='"The Search string to match."', args=[d.arg(name='search', type=d.T.string)]), + withSearch(search): { string+: { replace+: { search: search } } }, + }, + '#withConvert':: d.fn(help='"Optional conversion method to be specified.\\n`ToUpper` and `ToLower` change the letter case of the input string.\\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\\n`ToJson` converts any input value into its raw JSON representation.\\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\\nconverted to JSON."', args=[d.arg(name='convert', type=d.T.string)]), + withConvert(convert): { string+: { convert: convert } }, + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { string+: { fmt: fmt } }, + '#withTrim':: d.fn(help='"Trim the prefix or suffix from the input"', args=[d.arg(name='trim', type=d.T.string)]), + withTrim(trim): { string+: { trim: trim } }, + '#withType':: d.fn(help='"Type of the string transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { string+: { type: type } }, + }, + '#withMap':: d.fn(help='"Map uses the input as a key in the given map and returns the value."', args=[d.arg(name='map', type=d.T.object)]), + withMap(map): { map: map }, + '#withMapMixin':: d.fn(help='"Map uses the input as a key in the given map and returns the value."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='map', type=d.T.object)]), + withMapMixin(map): { map+: map }, + '#withType':: d.fn(help='"Type of the transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the resource whose value is\\nto be used as input. Required when type is FromCompositeFieldPath or\\nToCompositeFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath is the path of the field on the resource whose value will\\nbe changed with the result of transforms. Leave empty if you'd like to\\npropagate to the same path as fromFieldPath.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { toFieldPath: toFieldPath }, + '#withTransforms':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."', args=[d.arg(name='transforms', type=d.T.array)]), + withTransforms(transforms): { transforms: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withTransformsMixin':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='transforms', type=d.T.array)]), + withTransformsMixin(transforms): { transforms+: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withType':: d.fn(help='"Type sets the patching behaviour to be used. Each patch type may require\\nits own fields to be set on the ComposedPatch object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withName':: d.fn(help='"Name of this PatchSet."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPatches':: d.fn(help='"Patches will be applied as an overlay to the base resource."', args=[d.arg(name='patches', type=d.T.array)]), + withPatches(patches): { patches: if std.isArray(v=patches) then patches else [patches] }, + '#withPatchesMixin':: d.fn(help='"Patches will be applied as an overlay to the base resource."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patches', type=d.T.array)]), + withPatchesMixin(patches): { patches+: if std.isArray(v=patches) then patches else [patches] }, + }, + '#resources':: d.obj(help='"Resources is a list of resource templates that will be used when a\\ncomposite resource is created."'), + resources: { + '#connectionDetails':: d.obj(help='"ConnectionDetails lists the propagation secret keys from this composed\\nresource to the composition instance connection secret."'), + connectionDetails: { + '#withFromConnectionSecretKey':: d.fn(help="\"FromConnectionSecretKey is the key that will be used to fetch the value\\nfrom the composed resource's connection secret.\"", args=[d.arg(name='fromConnectionSecretKey', type=d.T.string)]), + withFromConnectionSecretKey(fromConnectionSecretKey): { fromConnectionSecretKey: fromConnectionSecretKey }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the composed resource whose\\nvalue to be used as input. Name must be specified if the type is\\nFromFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withName':: d.fn(help='"Name of the connection secret key that will be propagated to the\\nconnection secret of the composed resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withType':: d.fn(help='"Type sets the connection detail fetching behavior to be used. Each\\nconnection detail type may require its own fields to be set on the\\nConnectionDetail object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#withValue':: d.fn(help='"Value that will be propagated to the connection secret of the composite\\nresource. May be set to inject a fixed, non-sensitive connection secret\\nvalue, for example a well-known port."', args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + }, + '#patches':: d.obj(help='"Patches to and from the composed resource."'), + patches: { + '#combine':: d.obj(help='"Combine is the patch configuration for a CombineFromComposite,\\nCombineToComposite patch."'), + combine: { + '#string':: d.obj(help='"String declares that input variables should be combined into a single\\nstring, using the relevant settings for formatting purposes."'), + string: { + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { combine+: { string+: { fmt: fmt } } }, + }, + '#variables':: d.obj(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."'), + variables: { + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the source whose value is\\nto be used as input."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + }, + '#withStrategy':: d.fn(help='"Strategy defines the strategy to use to combine the input variable values.\\nCurrently only string is supported."', args=[d.arg(name='strategy', type=d.T.string)]), + withStrategy(strategy): { combine+: { strategy: strategy } }, + '#withVariables':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { combine+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables are the list of variables whose values will be retrieved and\\ncombined."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { combine+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#policy':: d.obj(help='"Policy configures the specifics of patching behaviour."'), + policy: { + '#withFromFieldPath':: d.fn(help="\"FromFieldPath specifies how to patch from a field path. The default is\\n'Optional', which means the patch will be a no-op if the specified\\nfromFieldPath does not exist. Use 'Required' to prevent the creation of a\\nnew composed resource until the required path exists.\"", args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { policy+: { fromFieldPath: fromFieldPath } }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath specifies how to patch to a field path. The default is\\n'Replace', which means the patch will completely replace the target field,\\nor create it if it does not exist. Use 'MergeObjects' to recursively merge the patch\\nobject with the target object, while keeping target object keys, but overwriting any array values, or use\\n'MergeObjectsAppendArrays' to recursively merge the patch object with the target object, while keeping\\ntarget object keys and appending any array values to target array values, or use\\n'ForceMergeObjects' to recursively merge the patch object with the target object, overwriting\\nany target object keys, including array values, or use\\n'ForceMergeObjectsAppendArrays' to recursively merge the patch object with the target object,\\noverwriting target object keys, and appending any array values to target array values.\\n'MergeObject' is deprecated, use 'MergeObjects' instead, which is functionally identical.\\n'AppendArray' is deprecated, use 'ForceMergeObjectsAppendArrays' instead, which is functionally identical.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { policy+: { toFieldPath: toFieldPath } }, + }, + '#transforms':: d.obj(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."'), + transforms: { + '#convert':: d.obj(help='"Convert is used to cast the input into the given output type."'), + convert: { + '#withFormat':: d.fn(help='"The expected input format.\\n\\n\\n* `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).\\nOnly used during `string -> float64` conversions.\\n* `json` - parses the input as a JSON string.\\nOnly used during `string -> object` or `string -> list` conversions.\\n\\n\\nIf this property is null, the default conversion is applied."', args=[d.arg(name='format', type=d.T.string)]), + withFormat(format): { convert+: { format: format } }, + '#withToType':: d.fn(help='"ToType is the type of the output of this transform."', args=[d.arg(name='toType', type=d.T.string)]), + withToType(toType): { convert+: { toType: toType } }, + }, + '#match':: d.obj(help='"Match is a more complex version of Map that matches a list of patterns."'), + match: { + '#patterns':: d.obj(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."'), + patterns: { + '#withLiteral':: d.fn(help='"Literal exactly matches the input string (case sensitive).\\nIs required if `type` is `literal`."', args=[d.arg(name='literal', type=d.T.string)]), + withLiteral(literal): { literal: literal }, + '#withRegexp':: d.fn(help='"Regexp to match against the input string.\\nIs required if `type` is `regexp`."', args=[d.arg(name='regexp', type=d.T.string)]), + withRegexp(regexp): { regexp: regexp }, + '#withResult':: d.fn(help='"The value that is used as result of the transform if the pattern matches."', args=[d.arg(name='result', type=d.T.any)]), + withResult(result): { result: result }, + '#withType':: d.fn(help='"Type specifies how the pattern matches the input.\\n\\n\\n* `literal` - the pattern value has to exactly match (case sensitive) the\\ninput string. This is the default.\\n\\n\\n* `regexp` - the pattern treated as a regular expression against\\nwhich the input string is tested. Crossplane will throw an error if the\\nkey is not a valid regexp."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFallbackTo':: d.fn(help='"Determines to what value the transform should fallback if no pattern matches."', args=[d.arg(name='fallbackTo', type=d.T.string)]), + withFallbackTo(fallbackTo): { match+: { fallbackTo: fallbackTo } }, + '#withFallbackValue':: d.fn(help='"The fallback value that should be returned by the transform if now pattern\\nmatches."', args=[d.arg(name='fallbackValue', type=d.T.any)]), + withFallbackValue(fallbackValue): { match+: { fallbackValue: fallbackValue } }, + '#withPatterns':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."', args=[d.arg(name='patterns', type=d.T.array)]), + withPatterns(patterns): { match+: { patterns: if std.isArray(v=patterns) then patterns else [patterns] } }, + '#withPatternsMixin':: d.fn(help='"The patterns that should be tested against the input string.\\nPatterns are tested in order. The value of the first match is used as\\nresult of this transform."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patterns', type=d.T.array)]), + withPatternsMixin(patterns): { match+: { patterns+: if std.isArray(v=patterns) then patterns else [patterns] } }, + }, + '#math':: d.obj(help='"Math is used to transform the input via mathematical operations such as\\nmultiplication."'), + math: { + '#withClampMax':: d.fn(help='"ClampMax makes sure that the value is not bigger than the given value."', args=[d.arg(name='clampMax', type=d.T.integer)]), + withClampMax(clampMax): { math+: { clampMax: clampMax } }, + '#withClampMin':: d.fn(help='"ClampMin makes sure that the value is not smaller than the given value."', args=[d.arg(name='clampMin', type=d.T.integer)]), + withClampMin(clampMin): { math+: { clampMin: clampMin } }, + '#withMultiply':: d.fn(help='"Multiply the value."', args=[d.arg(name='multiply', type=d.T.integer)]), + withMultiply(multiply): { math+: { multiply: multiply } }, + '#withType':: d.fn(help='"Type of the math transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { math+: { type: type } }, + }, + '#string':: d.obj(help='"String is used to transform the input into a string or a different kind\\nof string. Note that the input does not necessarily need to be a string."'), + string: { + '#join':: d.obj(help='"Join the input strings."'), + join: { + '#withSeparator':: d.fn(help='"Separator to join the input strings."', args=[d.arg(name='separator', type=d.T.string)]), + withSeparator(separator): { string+: { join+: { separator: separator } } }, + }, + '#regexp':: d.obj(help='"Extract a match from the input using a regular expression."'), + regexp: { + '#withGroup':: d.fn(help='"Group number to match. 0 (the default) matches the entire expression."', args=[d.arg(name='group', type=d.T.integer)]), + withGroup(group): { string+: { regexp+: { group: group } } }, + '#withMatch':: d.fn(help='"Match string. May optionally include submatches, aka capture groups.\\nSee https://pkg.go.dev/regexp/ for details."', args=[d.arg(name='match', type=d.T.string)]), + withMatch(match): { string+: { regexp+: { match: match } } }, + }, + '#replace':: d.obj(help='"Search/Replace applied to the input string."'), + replace: { + '#withReplace':: d.fn(help='"The Replace string replaces all occurrences of the search string."', args=[d.arg(name='replace', type=d.T.string)]), + withReplace(replace): { string+: { replace+: { replace: replace } } }, + '#withSearch':: d.fn(help='"The Search string to match."', args=[d.arg(name='search', type=d.T.string)]), + withSearch(search): { string+: { replace+: { search: search } } }, + }, + '#withConvert':: d.fn(help='"Optional conversion method to be specified.\\n`ToUpper` and `ToLower` change the letter case of the input string.\\n`ToBase64` and `FromBase64` perform a base64 conversion based on the input string.\\n`ToJson` converts any input value into its raw JSON representation.\\n`ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input\\nconverted to JSON."', args=[d.arg(name='convert', type=d.T.string)]), + withConvert(convert): { string+: { convert: convert } }, + '#withFmt':: d.fn(help='"Format the input using a Go format string. See\\nhttps://golang.org/pkg/fmt/ for details."', args=[d.arg(name='fmt', type=d.T.string)]), + withFmt(fmt): { string+: { fmt: fmt } }, + '#withTrim':: d.fn(help='"Trim the prefix or suffix from the input"', args=[d.arg(name='trim', type=d.T.string)]), + withTrim(trim): { string+: { trim: trim } }, + '#withType':: d.fn(help='"Type of the string transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { string+: { type: type } }, + }, + '#withMap':: d.fn(help='"Map uses the input as a key in the given map and returns the value."', args=[d.arg(name='map', type=d.T.object)]), + withMap(map): { map: map }, + '#withMapMixin':: d.fn(help='"Map uses the input as a key in the given map and returns the value."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='map', type=d.T.object)]), + withMapMixin(map): { map+: map }, + '#withType':: d.fn(help='"Type of the transform to be run."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withFromFieldPath':: d.fn(help='"FromFieldPath is the path of the field on the resource whose value is\\nto be used as input. Required when type is FromCompositeFieldPath or\\nToCompositeFieldPath."', args=[d.arg(name='fromFieldPath', type=d.T.string)]), + withFromFieldPath(fromFieldPath): { fromFieldPath: fromFieldPath }, + '#withPatchSetName':: d.fn(help='"PatchSetName to include patches from. Required when type is PatchSet."', args=[d.arg(name='patchSetName', type=d.T.string)]), + withPatchSetName(patchSetName): { patchSetName: patchSetName }, + '#withToFieldPath':: d.fn(help="\"ToFieldPath is the path of the field on the resource whose value will\\nbe changed with the result of transforms. Leave empty if you'd like to\\npropagate to the same path as fromFieldPath.\"", args=[d.arg(name='toFieldPath', type=d.T.string)]), + withToFieldPath(toFieldPath): { toFieldPath: toFieldPath }, + '#withTransforms':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."', args=[d.arg(name='transforms', type=d.T.array)]), + withTransforms(transforms): { transforms: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withTransformsMixin':: d.fn(help='"Transforms are the list of functions that are used as a FIFO pipe for the\\ninput to be transformed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='transforms', type=d.T.array)]), + withTransformsMixin(transforms): { transforms+: if std.isArray(v=transforms) then transforms else [transforms] }, + '#withType':: d.fn(help='"Type sets the patching behaviour to be used. Each patch type may require\\nits own fields to be set on the ComposedPatch object."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#readinessChecks':: d.obj(help='"ReadinessChecks allows users to define custom readiness checks. All\\nchecks have to return true in order for resource to be considered ready.\\nThe default readiness check is to have the \\"Ready\\" condition to be\\n\\"True\\"."'), + readinessChecks: { + '#matchCondition':: d.obj(help="\"MatchCondition specifies the condition you'd like to match if you're using \\\"MatchCondition\\\" type.\""), + matchCondition: { + '#withType':: d.fn(help="\"Type indicates the type of condition you'd like to use.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { matchCondition+: { type: type } }, + }, + '#withFieldPath':: d.fn(help='"FieldPath shows the path of the field whose value will be used."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { fieldPath: fieldPath }, + '#withMatchInteger':: d.fn(help="\"MatchInt is the value you'd like to match if you're using \\\"MatchInt\\\" type.\"", args=[d.arg(name='matchInteger', type=d.T.integer)]), + withMatchInteger(matchInteger): { matchInteger: matchInteger }, + '#withMatchString':: d.fn(help="\"MatchString is the value you'd like to match if you're using \\\"MatchString\\\" type.\"", args=[d.arg(name='matchString', type=d.T.string)]), + withMatchString(matchString): { matchString: matchString }, + '#withType':: d.fn(help="\"Type indicates the type of probe you'd like to use.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + }, + '#withBase':: d.fn(help='"Base of the composed resource that patches will be applied to and from.\\nIf base is omitted, a previous Function within the pipeline must have\\nproduced the named composed resource. Patches will be applied to and from\\nthat resource. If base is specified, and a previous Function within the\\npipeline produced the name composed resource, it will be overwritten."', args=[d.arg(name='base', type=d.T.object)]), + withBase(base): { base: base }, + '#withBaseMixin':: d.fn(help='"Base of the composed resource that patches will be applied to and from.\\nIf base is omitted, a previous Function within the pipeline must have\\nproduced the named composed resource. Patches will be applied to and from\\nthat resource. If base is specified, and a previous Function within the\\npipeline produced the name composed resource, it will be overwritten."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='base', type=d.T.object)]), + withBaseMixin(base): { base+: base }, + '#withConnectionDetails':: d.fn(help='"ConnectionDetails lists the propagation secret keys from this composed\\nresource to the composition instance connection secret."', args=[d.arg(name='connectionDetails', type=d.T.array)]), + withConnectionDetails(connectionDetails): { connectionDetails: if std.isArray(v=connectionDetails) then connectionDetails else [connectionDetails] }, + '#withConnectionDetailsMixin':: d.fn(help='"ConnectionDetails lists the propagation secret keys from this composed\\nresource to the composition instance connection secret."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='connectionDetails', type=d.T.array)]), + withConnectionDetailsMixin(connectionDetails): { connectionDetails+: if std.isArray(v=connectionDetails) then connectionDetails else [connectionDetails] }, + '#withName':: d.fn(help='"A Name uniquely identifies this entry within its resources array."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPatches':: d.fn(help='"Patches to and from the composed resource."', args=[d.arg(name='patches', type=d.T.array)]), + withPatches(patches): { patches: if std.isArray(v=patches) then patches else [patches] }, + '#withPatchesMixin':: d.fn(help='"Patches to and from the composed resource."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patches', type=d.T.array)]), + withPatchesMixin(patches): { patches+: if std.isArray(v=patches) then patches else [patches] }, + '#withReadinessChecks':: d.fn(help='"ReadinessChecks allows users to define custom readiness checks. All\\nchecks have to return true in order for resource to be considered ready.\\nThe default readiness check is to have the \\"Ready\\" condition to be\\n\\"True\\"."', args=[d.arg(name='readinessChecks', type=d.T.array)]), + withReadinessChecks(readinessChecks): { readinessChecks: if std.isArray(v=readinessChecks) then readinessChecks else [readinessChecks] }, + '#withReadinessChecksMixin':: d.fn(help='"ReadinessChecks allows users to define custom readiness checks. All\\nchecks have to return true in order for resource to be considered ready.\\nThe default readiness check is to have the \\"Ready\\" condition to be\\n\\"True\\"."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessChecks', type=d.T.array)]), + withReadinessChecksMixin(readinessChecks): { readinessChecks+: if std.isArray(v=readinessChecks) then readinessChecks else [readinessChecks] }, + }, + '#withPatchSets':: d.fn(help='"PatchSets define a named set of patches that may be included by any\\nresource. PatchSets cannot themselves refer to other PatchSets."', args=[d.arg(name='patchSets', type=d.T.array)]), + withPatchSets(patchSets): { patchSets: if std.isArray(v=patchSets) then patchSets else [patchSets] }, + '#withPatchSetsMixin':: d.fn(help='"PatchSets define a named set of patches that may be included by any\\nresource. PatchSets cannot themselves refer to other PatchSets."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='patchSets', type=d.T.array)]), + withPatchSetsMixin(patchSets): { patchSets+: if std.isArray(v=patchSets) then patchSets else [patchSets] }, + '#withResources':: d.fn(help='"Resources is a list of resource templates that will be used when a\\ncomposite resource is created."', args=[d.arg(name='resources', type=d.T.array)]), + withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, + '#withResourcesMixin':: d.fn(help='"Resources is a list of resource templates that will be used when a\\ncomposite resource is created."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resources', type=d.T.array)]), + withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/function-patch-and-transform/0.7/gen.libsonnet b/function-patch-and-transform/0.7/gen.libsonnet new file mode 100644 index 0000000..56df6ab --- /dev/null +++ b/function-patch-and-transform/0.7/gen.libsonnet @@ -0,0 +1,5 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='function_patch_and_transform', url='github.com/jsonnet-libs/crossplane-core-libsonnet/function-patch-and-transform/0.7/main.libsonnet', help=''), + pt:: (import '_gen/pt/main.libsonnet'), +} diff --git a/function-patch-and-transform/0.7/main.libsonnet b/function-patch-and-transform/0.7/main.libsonnet new file mode 100644 index 0000000..f5597a5 --- /dev/null +++ b/function-patch-and-transform/0.7/main.libsonnet @@ -0,0 +1 @@ +(import 'gen.libsonnet') diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000..1d5b5cc --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,38 @@ +"edit_uri": "" +"extra_css": + - "stylesheets/extra.css" +"markdown_extensions": + - "pymdownx.highlight" + - "pymdownx.superfences" + - "mdx_truly_sane_lists": + "nested_indent": 2 + "truly_sane": true +"plugins": + - "search": + "separator": "[\\s\\-\\.]+" + - "minify": + "minify_html": true +"repo_name": "jsonnet-libs/crossplane-core-libsonnet" +"repo_url": "https://github.com/jsonnet-libs/crossplane-core-libsonnet" +"site_name": "crossplane-core jsonnet library" +"site_url": "https://jsonnet-libs.github.io/crossplane-core-libsonnet" +"theme": + "features": + - "navigation.tabs" + - "navigation.indexes" + "name": "material" + "palette": + - "accent": "indigo" + "media": "(prefers-color-scheme: light)" + "primary": "indigo" + "scheme": "default" + "toggle": + "icon": "material/toggle-switch-off-outline" + "name": "Switch to dark mode" + - "accent": "red" + "media": "(prefers-color-scheme: dark)" + "primary": "red" + "scheme": "slate" + "toggle": + "icon": "material/toggle-switch" + "name": "Switch to light mode" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..c790bf2 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,14 @@ +# mkdocs +mkdocs>=1.3.0 + +# To limit search to a subset +mkdocs-exclude-search>=0.5 + +# To minify the HTML +mkdocs-minify-plugin>=0.3 + +# Include the theme +mkdocs-material>=7.1.6 + +# Deal with list indent of 2 spaces +mdx-truly-sane-lists>=1.3