diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 90da768350..04695353b6 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -388,6 +388,14 @@ functions: TASKFILE_TARGET: test-short args: [*task-runner, run-docker] + run-goleak-test: + - command: subprocess.exec + type: test + params: + binary: "bash" + include_expansions_in_env: ["MONGODB_URI"] + args: [*task-runner, test-goleak] + run-ocsp-server: - command: subprocess.exec params: @@ -1100,6 +1108,26 @@ tasks: - func: bootstrap-mongo-orchestration - func: run-docker-test + - name: test-goroutine-leaks-replicaset + tags: ["goleak"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "replica_set" + AUTH: "noauth" + SSL: "nossl" + - func: run-goleak-test + + - name: test-goroutine-leaks-sharded + tags: ["goleak"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "noauth" + SSL: "nossl" + - func: run-goleak-test + - name: test-load-balancer-noauth-nossl tags: ["load-balancer"] commands: @@ -2056,6 +2084,16 @@ buildvariants: tasks: - name: "test-docker-runner" + - name: goroutine-leaks-test + tags: ["pullrequest"] + display_name: "Goroutine Leaks Test" + run_on: + - ubuntu2204-large + expansions: + GO_DIST: "/opt/golang/go1.22" + tasks: + - name: ".goleak" + - matrix_name: "tests-rhel-36-with-zlib-support" tags: ["pullrequest"] matrix_spec: { version: ["3.6"], os-ssl-32: ["rhel87-64"] } diff --git a/Dockerfile b/Dockerfile index 06b88f5abd..46b6cee71b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # Dockerfile for Go Driver local development. # Build libmongocrypt in a separate build stage. -FROM ubuntu:20.04 as libmongocrypt +FROM artifactory.corp.mongodb.com/dockerhub/ubuntu:20.04 as libmongocrypt RUN apt-get -qq update && \ apt-get -qqy install --no-install-recommends \ @@ -17,7 +17,7 @@ RUN cd /root && bash ./install-libmongocrypt.sh # Copy in the files from the libmongocrypt build stage. -FROM ubuntu:20.04 +FROM artifactory.corp.mongodb.com/dockerhub/ubuntu:20.04 # Install common deps. RUN export DEBIAN_FRONTEND=noninteractive && \ diff --git a/Taskfile.yml b/Taskfile.yml index f22427a640..f65ab0df52 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -105,6 +105,8 @@ tasks: test-gcpkms: bash etc/run-gcpkms-test.sh + test-goleak: bash etc/run-goleak-test.sh + ### Local FaaS tasks. ### build-faas-awslambda: requires: diff --git a/bson/decoder.go b/bson/decoder.go index c6a7626719..2fa9e6f1d6 100644 --- a/bson/decoder.go +++ b/bson/decoder.go @@ -87,7 +87,7 @@ func (d *Decoder) SetRegistry(r *Registry) { d.dc.Registry = r } -// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This +// DefaultDocumentM causes the Decoder to always unmarshal documents into the bson.M type. This // behavior is restricted to data typed as "interface{}" or "map[string]interface{}". func (d *Decoder) DefaultDocumentM() { d.dc.defaultDocumentType = reflect.TypeOf(M{}) @@ -101,7 +101,7 @@ func (d *Decoder) AllowTruncatingDoubles() { } // BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or -// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. +// "Old" BSON binary subtype as a Go byte slice instead of a bson.Binary. func (d *Decoder) BinaryAsSlice() { d.dc.binaryAsSlice = true } diff --git a/bson/decoder_test.go b/bson/decoder_test.go index 4cebea9d0e..e88aca6d6b 100644 --- a/bson/decoder_test.go +++ b/bson/decoder_test.go @@ -647,7 +647,8 @@ func TestDecoderConfiguration(t *testing.T) { var got objectIDTest err := dec.Decode(&got) - assert.EqualError(t, err, "error decoding key id: decoding an object ID to a non-hexadecimal string representation is not supported") + const want = "error decoding key id: decoding an object ID into a string is not supported by default (set Decoder.ObjectIDAsHexString to enable decoding as a hexadecimal string)" + assert.EqualError(t, err, want) }) t.Run("DefaultDocumentM top-level", func(t *testing.T) { t.Parallel() diff --git a/bson/doc.go b/bson/doc.go index 9990da008e..81aceef278 100644 --- a/bson/doc.go +++ b/bson/doc.go @@ -47,20 +47,20 @@ // 5. BSON boolean unmarshals to a bool. // 6. BSON embedded document unmarshals to the parent type (i.e. D for a D, M for an M). // 7. BSON array unmarshals to a bson.A. -// 8. BSON ObjectId unmarshals to a primitive.ObjectID. -// 9. BSON datetime unmarshals to a primitive.DateTime. -// 10. BSON binary unmarshals to a primitive.Binary. -// 11. BSON regular expression unmarshals to a primitive.Regex. -// 12. BSON JavaScript unmarshals to a primitive.JavaScript. -// 13. BSON code with scope unmarshals to a primitive.CodeWithScope. -// 14. BSON timestamp unmarshals to an primitive.Timestamp. -// 15. BSON 128-bit decimal unmarshals to an primitive.Decimal128. -// 16. BSON min key unmarshals to an primitive.MinKey. -// 17. BSON max key unmarshals to an primitive.MaxKey. -// 18. BSON undefined unmarshals to a primitive.Undefined. +// 8. BSON ObjectId unmarshals to a bson.ObjectID. +// 9. BSON datetime unmarshals to a bson.DateTime. +// 10. BSON binary unmarshals to a bson.Binary. +// 11. BSON regular expression unmarshals to a bson.Regex. +// 12. BSON JavaScript unmarshals to a bson.JavaScript. +// 13. BSON code with scope unmarshals to a bson.CodeWithScope. +// 14. BSON timestamp unmarshals to an bson.Timestamp. +// 15. BSON 128-bit decimal unmarshals to an bson.Decimal128. +// 16. BSON min key unmarshals to an bson.MinKey. +// 17. BSON max key unmarshals to an bson.MaxKey. +// 18. BSON undefined unmarshals to a bson.Undefined. // 19. BSON null unmarshals to nil. -// 20. BSON DBPointer unmarshals to a primitive.DBPointer. -// 21. BSON symbol unmarshals to a primitive.Symbol. +// 20. BSON DBPointer unmarshals to a bson.DBPointer. +// 21. BSON symbol unmarshals to a bson.Symbol. // // The above mappings also apply when marshaling a D or M to BSON. Some other useful marshaling mappings are: // diff --git a/bson/primitive.go b/bson/primitive.go index 80cf085b02..162bfa7133 100644 --- a/bson/primitive.go +++ b/bson/primitive.go @@ -60,7 +60,7 @@ func (d DateTime) MarshalJSON() ([]byte, error) { return d.Time().UTC().MarshalJSON() } -// UnmarshalJSON creates a primitive.DateTime from a JSON string. +// UnmarshalJSON creates a bson.DateTime from a JSON string. func (d *DateTime) UnmarshalJSON(data []byte) error { // Ignore "null" so that we can distinguish between a "null" value and // valid value that is the zero time (as reported by time.Time.IsZero). diff --git a/bson/string_codec.go b/bson/string_codec.go index a780677013..456028c1de 100644 --- a/bson/string_codec.go +++ b/bson/string_codec.go @@ -7,6 +7,7 @@ package bson import ( + "errors" "fmt" "reflect" ) @@ -50,14 +51,16 @@ func (sc *stringCodec) decodeType(dc DecodeContext, vr ValueReader, t reflect.Ty return emptyValue, err } case TypeObjectID: - oid, err := vr.ReadObjectID() - if err != nil { - return emptyValue, err - } if dc.objectIDAsHexString { + oid, err := vr.ReadObjectID() + if err != nil { + return emptyValue, err + } str = oid.Hex() } else { - return emptyValue, fmt.Errorf("decoding an object ID to a non-hexadecimal string representation is not supported") + const msg = "decoding an object ID into a string is not supported by default " + + "(set Decoder.ObjectIDAsHexString to enable decoding as a hexadecimal string)" + return emptyValue, errors.New(msg) } case TypeSymbol: str, err = vr.ReadSymbol() diff --git a/docs/migration-2.0.md b/docs/migration-2.0.md index 3b0ad3b7fa..aa11b57f92 100644 --- a/docs/migration-2.0.md +++ b/docs/migration-2.0.md @@ -867,6 +867,34 @@ The `NewRegistryBuilder` function has been removed along with the `bsoncodec.Reg ### Decoder +The BSON decoding logic has changed to decode into a `bson.D` by default. + +The example shows the behavior change. + +```go +// v1 + +b1 := bson.M{"a": 1, "b": bson.M{"c": 2}} +b2, _ := bson.Marshal(b1) +b3 := bson.M{} +bson.Unmarshal(b2, &b3) +fmt.Printf("b3.b type: %T\n", b3["b"]) +// Output: b3.b type: primitive.M +``` + +```go +// v2 + +b1 := bson.M{"a": 1, "b": bson.M{"c": 2}} +b2, _ := bson.Marshal(b1) +b3 := bson.M{} +bson.Unmarshal(b2, &b3) +fmt.Printf("b3.b type: %T\n", b3["b"]) +// Output: b3.b type: bson.D +``` + +Use `Decoder.DefaultDocumentM()` or set the `DefaultDocumentM` field of `options.BSONOptions` to always decode documents into the `bson.M` type. + #### NewDecoder The signature of `NewDecoder` has been updated without an error being returned. @@ -1043,11 +1071,11 @@ The signature of `Reset` has been updated without an error being returned. #### DefaultDocumentD / DefaultDocumentM -`Decoder.DefaultDocumentD` has been removed since a document, including a top-level value (e.g. you pass in an empty interface value to Decode), is always decoded into a `bson.D` by default. Therefore, use `Decoder.DefaultDocumentM` to always decode a document into a `bson.M` to avoid unexpected decode results. +`Decoder.DefaultDocumentD()` has been removed since a document, including a top-level value (e.g. you pass in an empty interface value to Decode), is always decoded into a `bson.D` by default. Therefore, use `Decoder.DefaultDocumentM()` to always decode a document into a `bson.M` to avoid unexpected decode results. #### ObjectIDAsHexString -`Decoder.ObjectIDAsHexString` method enables decoding a BSON ObjectId as a hexadecimal string. Otherwise, the decoder returns an error by default instead of decoding as the UTF-8 representation of the raw ObjectId bytes, which results in a garbled and unusable string. +`Decoder.ObjectIDAsHexString()` method enables decoding a BSON ObjectId as a hexadecimal string. Otherwise, the decoder returns an error by default instead of decoding as the UTF-8 representation of the raw ObjectId bytes, which results in a garbled and unusable string. ### Encoder @@ -1183,8 +1211,20 @@ A new `RawArray` type has been added to the `bson` package as a primitive type t ### ValueMarshaler -The `MarshalBSONValue` method of the `ValueMarshaler` interface is only required to return a byte type value representing the BSON type to avoid importing the `bsontype` package. +The `MarshalBSONValue` method of the [ValueMarshaler](https://pkg.go.dev/go.mongodb.org/mongo-driver/v2/bson#ValueMarshaler) interface now returns a `byte` value representing the [BSON type](https://pkg.go.dev/go.mongodb.org/mongo-driver/v2/bson#Type). That allows external packages to implement the `ValueMarshaler` interface without having to import the `bson` package. Convert a returned `byte` value to [bson.Type](https://pkg.go.dev/go.mongodb.org/mongo-driver/v2/bson#Type) to compare with the BSON type constants. For example: + +```go +btype, _, _ := m.MarshalBSONValue() +fmt.Println("type of data: %s: ", bson.Type(btype)) +fmt.Println("type of data is an array: %v", bson.Type(btype) == bson.TypeArray) +``` ### ValueUnmarshaler -The `UnmarshalBSONValue` method of the `ValueUnmarshaler` interface is only required to take a byte type argument representing the BSON type to avoid importing the Go driver package. +The `UnmarshalBSONValue` method of the [ValueUnmarshaler](https://pkg.go.dev/go.mongodb.org/mongo-driver/v2/bson#ValueUnmarshaler) interface now accepts a `byte` value representing the [BSON type](https://pkg.go.dev/go.mongodb.org/mongo-driver/v2/bson#Type) for the first argument. That allows packages to implement `ValueUnmarshaler` without having to import the `bson` package. For example: + +```go +if err := m.UnmarshalBSONValue(bson.TypeEmbeddedDocument, bytes); err != nil { + log.Fatalf("failed to decode embedded document: %v", err) +} +``` diff --git a/etc/run-goleak-test.sh b/etc/run-goleak-test.sh new file mode 100755 index 0000000000..143df5e7f4 --- /dev/null +++ b/etc/run-goleak-test.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# run-goleak-test +# Run goroutine leak tests. +set -eu +set +x + +echo "Running internal/test/goleak" +pushd internal/test/goleak +go test -v ./... >> ../../../test.suite +popd diff --git a/examples/_logger/logrus/go.mod b/examples/_logger/logrus/go.mod index f6ec322f68..8224e28c26 100644 --- a/examples/_logger/logrus/go.mod +++ b/examples/_logger/logrus/go.mod @@ -18,8 +18,8 @@ require ( github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect - golang.org/x/crypto v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect ) diff --git a/examples/_logger/logrus/go.sum b/examples/_logger/logrus/go.sum index 8f12f843fb..c01f693612 100644 --- a/examples/_logger/logrus/go.sum +++ b/examples/_logger/logrus/go.sum @@ -30,32 +30,32 @@ github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfS github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= diff --git a/examples/_logger/zap/go.mod b/examples/_logger/zap/go.mod index 4d47994a73..dd6559be51 100644 --- a/examples/_logger/zap/go.mod +++ b/examples/_logger/zap/go.mod @@ -20,7 +20,7 @@ require ( github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/text v0.20.0 // indirect ) diff --git a/examples/_logger/zap/go.sum b/examples/_logger/zap/go.sum index b86ad07cf7..9281edf34c 100644 --- a/examples/_logger/zap/go.sum +++ b/examples/_logger/zap/go.sum @@ -48,8 +48,8 @@ go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -58,8 +58,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -71,8 +71,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/examples/_logger/zerolog/go.mod b/examples/_logger/zerolog/go.mod index 9913c321c4..bde9b5cc6f 100644 --- a/examples/_logger/zerolog/go.mod +++ b/examples/_logger/zerolog/go.mod @@ -20,8 +20,8 @@ require ( github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect - golang.org/x/crypto v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect ) diff --git a/examples/_logger/zerolog/go.sum b/examples/_logger/zerolog/go.sum index 8cc6562614..3dfb1f2727 100644 --- a/examples/_logger/zerolog/go.sum +++ b/examples/_logger/zerolog/go.sum @@ -31,16 +31,16 @@ github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfS github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -48,16 +48,16 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= diff --git a/go.mod b/go.mod index a51898d0fd..070ed697bc 100644 --- a/go.mod +++ b/go.mod @@ -10,13 +10,13 @@ require ( github.com/xdg-go/scram v1.1.2 github.com/xdg-go/stringprep v1.0.4 github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 - golang.org/x/crypto v0.28.0 - golang.org/x/sync v0.8.0 + golang.org/x/crypto v0.29.0 + golang.org/x/sync v0.9.0 ) require ( github.com/xdg-go/pbkdf2 v1.0.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/text v0.20.0 // indirect ) replace golang.org/x/net/http2 => golang.org/x/net/http2 v0.23.0 // GODRIVER-3225 diff --git a/go.sum b/go.sum index e4a93e189c..cea9f9e223 100644 --- a/go.sum +++ b/go.sum @@ -17,16 +17,16 @@ github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfS github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -38,8 +38,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= diff --git a/go.work b/go.work index d1dbf036ba..e508be5714 100644 --- a/go.work +++ b/go.work @@ -8,4 +8,5 @@ use ( ./internal/cmd/benchmark ./internal/cmd/compilecheck ./internal/cmd/faas/awslambda/mongodb + ./internal/test/goleak ) diff --git a/internal/cmd/benchmark/go.mod b/internal/cmd/benchmark/go.mod index f99d93f20c..6582dfd09c 100644 --- a/internal/cmd/benchmark/go.mod +++ b/internal/cmd/benchmark/go.mod @@ -18,9 +18,9 @@ require ( github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect - golang.org/x/crypto v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/text v0.20.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/internal/cmd/benchmark/go.sum b/internal/cmd/benchmark/go.sum index a7b751018d..aa205ecc72 100644 --- a/internal/cmd/benchmark/go.sum +++ b/internal/cmd/benchmark/go.sum @@ -31,16 +31,16 @@ github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfS github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -52,8 +52,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= diff --git a/internal/cmd/compilecheck/go.mod b/internal/cmd/compilecheck/go.mod index bbc4aae937..46395b8c1e 100644 --- a/internal/cmd/compilecheck/go.mod +++ b/internal/cmd/compilecheck/go.mod @@ -15,7 +15,7 @@ require ( github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect - golang.org/x/crypto v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/text v0.20.0 // indirect ) diff --git a/internal/cmd/compilecheck/go.sum b/internal/cmd/compilecheck/go.sum index db8c667547..d2c1cbcd83 100644 --- a/internal/cmd/compilecheck/go.sum +++ b/internal/cmd/compilecheck/go.sum @@ -15,16 +15,16 @@ github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfS github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -36,8 +36,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= diff --git a/internal/cmd/faas/awslambda/mongodb/go.mod b/internal/cmd/faas/awslambda/mongodb/go.mod index a002c13046..5cbe9ad118 100644 --- a/internal/cmd/faas/awslambda/mongodb/go.mod +++ b/internal/cmd/faas/awslambda/mongodb/go.mod @@ -16,9 +16,9 @@ require ( github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect - golang.org/x/crypto v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/text v0.20.0 // indirect ) replace gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.2.8 diff --git a/internal/cmd/faas/awslambda/mongodb/go.sum b/internal/cmd/faas/awslambda/mongodb/go.sum index 0adaac0231..3f1d329c18 100644 --- a/internal/cmd/faas/awslambda/mongodb/go.sum +++ b/internal/cmd/faas/awslambda/mongodb/go.sum @@ -29,16 +29,16 @@ github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfS github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -50,8 +50,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= diff --git a/internal/cmd/testatlas/atlas_test.go b/internal/cmd/testatlas/atlas_test.go index 466b0b4233..cf4a84735f 100644 --- a/internal/cmd/testatlas/atlas_test.go +++ b/internal/cmd/testatlas/atlas_test.go @@ -17,7 +17,6 @@ import ( "go.mongodb.org/mongo-driver/v2/bson" "go.mongodb.org/mongo-driver/v2/internal/handshake" - "go.mongodb.org/mongo-driver/v2/internal/mongoutil" "go.mongodb.org/mongo-driver/v2/mongo" "go.mongodb.org/mongo-driver/v2/mongo/options" ) @@ -46,12 +45,7 @@ func TestAtlas(t *testing.T) { t.Fatalf("error running test with TLS at index %d: %v", idx, err) } - args, err := mongoutil.NewOptions[options.ClientOptions](clientOpts) - if err != nil { - panic(fmt.Sprintf("failed to construct args from options: %v", err)) - } - - tlsConfigSkipVerify := args.TLSConfig + tlsConfigSkipVerify := clientOpts.TLSConfig tlsConfigSkipVerify.InsecureSkipVerify = true // Run the connectivity test with InsecureSkipVerify to ensure SNI is done correctly even if verification is @@ -66,7 +60,7 @@ func TestAtlas(t *testing.T) { t.Logf("Finished!") } -func runTest(ctx context.Context, clientOpts *options.ClientOptionsBuilder) error { +func runTest(ctx context.Context, clientOpts *options.ClientOptions) error { client, err := mongo.Connect(clientOpts) if err != nil { return fmt.Errorf("Connect error: %w", err) diff --git a/internal/integration/client_side_encryption_prose_test.go b/internal/integration/client_side_encryption_prose_test.go index 0620b9ff10..cb9ee2a89d 100644 --- a/internal/integration/client_side_encryption_prose_test.go +++ b/internal/integration/client_side_encryption_prose_test.go @@ -527,7 +527,7 @@ func TestClientSideEncryptionProse(t *testing.T) { tlsConfig["kmip"] = kmipConfig } - getBaseAutoEncryptionOpts := func() *options.AutoEncryptionOptionsBuilder { + getBaseAutoEncryptionOpts := func() *options.AutoEncryptionOptions { return options.AutoEncryption(). SetKmsProviders(fullKmsProvidersMap). SetKeyVaultNamespace(kvNamespace). @@ -537,7 +537,7 @@ func TestClientSideEncryptionProse(t *testing.T) { testCases := []struct { name string - aeo *options.AutoEncryptionOptionsBuilder + aeo *options.AutoEncryptionOptions schema bson.Raw // the schema to create the collection. if nil, the collection won't be explicitly created }{ {"remote schema", getBaseAutoEncryptionOpts(), corpusSchema}, @@ -1381,8 +1381,9 @@ func TestClientSideEncryptionProse(t *testing.T) { } }) - // These tests only run when 3 KMS HTTP servers and 1 KMS KMIP server are running. See specification for port numbers and necessary arguments: - // https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#kms-tls-options-tests + // These tests only run when 3 KMS HTTP servers and 1 KMS KMIP server are + // running. See specification for port numbers and necessary arguments: + // https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#10-kms-tls-tests mt.RunOpts("10. kms tls tests", noClientOpts, func(mt *mtest.T) { kmsTlsTestcase := os.Getenv("KMS_TLS_TESTCASE") if kmsTlsTestcase == "" { @@ -1436,12 +1437,17 @@ func TestClientSideEncryptionProse(t *testing.T) { } }) - // These tests only run when 3 KMS HTTP servers and 1 KMS KMIP server are running. See specification for port numbers and necessary arguments: - // https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#kms-tls-options-tests + // These tests only run when 3 KMS HTTP servers and 1 KMS KMIP server are + // running. See specification for port numbers and necessary arguments: + // https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#11-kms-tls-options-tests mt.RunOpts("11. kms tls options tests", noClientOpts, func(mt *mtest.T) { if os.Getenv("KMS_MOCK_SERVERS_RUNNING") == "" { mt.Skipf("Skipping test as KMS_MOCK_SERVERS_RUNNING is not set") } + if tlsCAFileKMIP == "" || tlsClientCertificateKeyFileKMIP == "" { + mt.Fatal("Env vars CSFLE_TLS_CA_FILE and CSFLE_TLS_CLIENT_CERT_FILE must be set") + } + validKmsProviders := map[string]map[string]interface{}{ "aws": { "accessKeyId": awsAccessKeyID, @@ -1511,50 +1517,50 @@ func TestClientSideEncryptionProse(t *testing.T) { SetKeyVaultNamespace(kvNamespace) // make TLS opts containing client certificate and CA file - tlsConfig := make(map[string]*tls.Config) - if tlsCAFileKMIP != "" && tlsClientCertificateKeyFileKMIP != "" { - clientAndCATlsMap := map[string]interface{}{ - "tlsCertificateKeyFile": tlsClientCertificateKeyFileKMIP, - "tlsCAFile": tlsCAFileKMIP, - } - certConfig, err := options.BuildTLSConfig(clientAndCATlsMap) - assert.Nil(mt, err, "BuildTLSConfig error: %v", err) - tlsConfig["aws"] = certConfig - tlsConfig["azure"] = certConfig - tlsConfig["gcp"] = certConfig - tlsConfig["kmip"] = certConfig - } + clientAndCATLSConfig, err := options.BuildTLSConfig(map[string]interface{}{ + "tlsCertificateKeyFile": tlsClientCertificateKeyFileKMIP, + "tlsCAFile": tlsCAFileKMIP, + }) + assert.Nil(mt, err, "BuildTLSConfig error: %v", err) // create valid Client Encryption options and set valid TLS options validClientEncryptionOptionsWithTLS := options.ClientEncryption(). SetKmsProviders(validKmsProviders). SetKeyVaultNamespace(kvNamespace). - SetTLSConfig(tlsConfig) + SetTLSConfig(map[string]*tls.Config{ + "aws": clientAndCATLSConfig, + "azure": clientAndCATLSConfig, + "gcp": clientAndCATLSConfig, + "kmip": clientAndCATLSConfig, + }) // make TLS opts containing only CA file - if tlsCAFileKMIP != "" { - caTlsMap := map[string]interface{}{ - "tlsCAFile": tlsCAFileKMIP, - } - certConfig, err := options.BuildTLSConfig(caTlsMap) - assert.Nil(mt, err, "BuildTLSConfig error: %v", err) - tlsConfig["aws"] = certConfig - tlsConfig["azure"] = certConfig - tlsConfig["gcp"] = certConfig - tlsConfig["kmip"] = certConfig - } + caTLSConfig, err := options.BuildTLSConfig(map[string]interface{}{ + "tlsCAFile": tlsCAFileKMIP, + }) + assert.Nil(mt, err, "BuildTLSConfig error: %v", err) // create invalid Client Encryption options with expired credentials expiredClientEncryptionOptions := options.ClientEncryption(). SetKmsProviders(expiredKmsProviders). SetKeyVaultNamespace(kvNamespace). - SetTLSConfig(tlsConfig) + SetTLSConfig(map[string]*tls.Config{ + "aws": caTLSConfig, + "azure": caTLSConfig, + "gcp": caTLSConfig, + "kmip": caTLSConfig, + }) // create invalid Client Encryption options with invalid hostnames invalidHostnameClientEncryptionOptions := options.ClientEncryption(). SetKmsProviders(invalidKmsProviders). SetKeyVaultNamespace(kvNamespace). - SetTLSConfig(tlsConfig) + SetTLSConfig(map[string]*tls.Config{ + "aws": caTLSConfig, + "azure": caTLSConfig, + "gcp": caTLSConfig, + "kmip": caTLSConfig, + }) awsMasterKeyNoClientCert := map[string]interface{}{ "region": "us-east-1", @@ -1620,7 +1626,8 @@ func TestClientSideEncryptionProse(t *testing.T) { possibleErrors := []string{ "x509: certificate signed by unknown authority", // Windows - "x509: “valid.testing.golang.invalid” certificate is not trusted", // MacOS + "x509: “valid.testing.golang.invalid” certificate is not trusted", // macOS + "x509: “server” certificate is not standards compliant", // macOS "x509: certificate is not authorized to sign other certificates", // All others } @@ -3007,7 +3014,7 @@ type cseProseTest struct { cseStarted []*event.CommandStartedEvent } -func setup(mt *mtest.T, aeo *options.AutoEncryptionOptionsBuilder, kvClientOpts options.Lister[options.ClientOptions], +func setup(mt *mtest.T, aeo *options.AutoEncryptionOptions, kvClientOpts *options.ClientOptions, ceo options.Lister[options.ClientEncryptionOptions]) *cseProseTest { mt.Helper() var cpt cseProseTest @@ -3086,7 +3093,7 @@ func rawValueToCoreValue(rv bson.RawValue) bsoncore.Value { type deadlockTest struct { clientTest *mongo.Client - clientKeyVaultOpts *options.ClientOptionsBuilder + clientKeyVaultOpts *options.ClientOptions clientKeyVaultEvents []startedEvent clientEncryption *mongo.ClientEncryption ciphertext bson.Binary diff --git a/internal/integration/client_side_encryption_test.go b/internal/integration/client_side_encryption_test.go index cd49e2bf00..84708601bc 100644 --- a/internal/integration/client_side_encryption_test.go +++ b/internal/integration/client_side_encryption_test.go @@ -356,11 +356,7 @@ func TestClientSideEncryptionCustomCrypt(t *testing.T) { ApplyURI(mtest.ClusterURI()). SetAutoEncryptionOptions(aeOpts) cc := &customCrypt{} - clientOpts.Opts = append(clientOpts.Opts, func(args *options.ClientOptions) error { - args.Crypt = cc - - return nil - }) + clientOpts.Crypt = cc integtest.AddTestServerAPIVersion(clientOpts) client, err := mongo.Connect(clientOpts) @@ -683,11 +679,7 @@ func TestFLEIndexView(t *testing.T) { SetReadPreference(mtest.PrimaryRp) cc := &customCrypt{} - opts.Opts = append(opts.Opts, func(args *options.ClientOptions) error { - args.Crypt = cc - - return nil - }) + opts.Crypt = cc integtest.AddTestServerAPIVersion(opts) diff --git a/internal/integration/collection_test.go b/internal/integration/collection_test.go index 056a527768..c0a3bfedc5 100644 --- a/internal/integration/collection_test.go +++ b/internal/integration/collection_test.go @@ -1165,6 +1165,7 @@ func TestCollection(t *testing.T) { SetHint(indexName). SetMax(bson.D{{"x", int32(5)}}). SetMin(bson.D{{"x", int32(0)}}). + SetOplogReplay(false). SetProjection(bson.D{{"x", int32(1)}}). SetReturnKey(false). SetShowRecordID(false). @@ -1186,6 +1187,7 @@ func TestCollection(t *testing.T) { AppendString("hint", indexName). StartDocument("max").AppendInt32("x", 5).FinishDocument(). StartDocument("min").AppendInt32("x", 0).FinishDocument(). + AppendBoolean("oplogReplay", false). StartDocument("projection").AppendInt32("x", 1).FinishDocument(). AppendBoolean("returnKey", false). AppendBoolean("showRecordId", false). diff --git a/internal/integration/data_lake_test.go b/internal/integration/data_lake_test.go index 6c10f9a9e6..23ef8da033 100644 --- a/internal/integration/data_lake_test.go +++ b/internal/integration/data_lake_test.go @@ -97,7 +97,7 @@ func TestAtlasDataLake(t *testing.T) { }) } -func getBaseClientOptions(mt *mtest.T) *options.ClientOptionsBuilder { +func getBaseClientOptions(mt *mtest.T) *options.ClientOptions { mt.Helper() hosts, err := mongoutil.HostsFromURI(mtest.ClusterURI()) diff --git a/internal/integration/handshake_test.go b/internal/integration/handshake_test.go index a11339981d..f4c449e30e 100644 --- a/internal/integration/handshake_test.go +++ b/internal/integration/handshake_test.go @@ -18,6 +18,7 @@ import ( "go.mongodb.org/mongo-driver/v2/internal/handshake" "go.mongodb.org/mongo-driver/v2/internal/integration/mtest" "go.mongodb.org/mongo-driver/v2/internal/require" + "go.mongodb.org/mongo-driver/v2/mongo/options" "go.mongodb.org/mongo-driver/v2/version" "go.mongodb.org/mongo-driver/v2/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/v2/x/mongo/driver/wiremessage" @@ -34,11 +35,23 @@ func TestHandshakeProse(t *testing.T) { CreateCollection(false). ClientType(mtest.Proxy) - clientMetadata := func(env bson.D) bson.D { + clientMetadata := func(env bson.D, info *options.DriverInfo) bson.D { + var ( + driverName = "mongo-go-driver" + driverVersion = version.Driver + platform = runtime.Version() + ) + + if info != nil { + driverName = driverName + "|" + info.Name + driverVersion = driverVersion + "|" + info.Version + platform = platform + "|" + info.Platform + } + elems := bson.D{ {Key: "driver", Value: bson.D{ - {Key: "name", Value: "mongo-go-driver"}, - {Key: "version", Value: version.Driver}, + {Key: "name", Value: driverName}, + {Key: "version", Value: driverVersion}, }}, {Key: "os", Value: bson.D{ {Key: "type", Value: runtime.GOOS}, @@ -46,7 +59,7 @@ func TestHandshakeProse(t *testing.T) { }}, } - elems = append(elems, bson.E{Key: "platform", Value: runtime.Version()}) + elems = append(elems, bson.E{Key: "platform", Value: platform}) // If env is empty, don't include it in the metadata. if env != nil && !reflect.DeepEqual(env, bson.D{}) { @@ -56,6 +69,12 @@ func TestHandshakeProse(t *testing.T) { return elems } + driverInfo := &options.DriverInfo{ + Name: "outer-library-name", + Version: "outer-library-version", + Platform: "outer-library-platform", + } + // Reset the environment variables to avoid environment namespace // collision. t.Setenv("AWS_EXECUTION_ENV", "") @@ -72,6 +91,7 @@ func TestHandshakeProse(t *testing.T) { for _, test := range []struct { name string env map[string]string + opts *options.ClientOptions want bson.D }{ { @@ -81,20 +101,22 @@ func TestHandshakeProse(t *testing.T) { "AWS_REGION": "us-east-2", "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "1024", }, + opts: nil, want: clientMetadata(bson.D{ {Key: "name", Value: "aws.lambda"}, {Key: "memory_mb", Value: 1024}, {Key: "region", Value: "us-east-2"}, - }), + }, nil), }, { name: "2. valid Azure", env: map[string]string{ "FUNCTIONS_WORKER_RUNTIME": "node", }, + opts: nil, want: clientMetadata(bson.D{ {Key: "name", Value: "azure.func"}, - }), + }, nil), }, { name: "3. valid GCP", @@ -104,12 +126,13 @@ func TestHandshakeProse(t *testing.T) { "FUNCTION_TIMEOUT_SEC": "60", "FUNCTION_REGION": "us-central1", }, + opts: nil, want: clientMetadata(bson.D{ {Key: "name", Value: "gcp.func"}, {Key: "memory_mb", Value: 1024}, {Key: "region", Value: "us-central1"}, {Key: "timeout_sec", Value: 60}, - }), + }, nil), }, { name: "4. valid Vercel", @@ -117,10 +140,11 @@ func TestHandshakeProse(t *testing.T) { "VERCEL": "1", "VERCEL_REGION": "cdg1", }, + opts: nil, want: clientMetadata(bson.D{ {Key: "name", Value: "vercel"}, {Key: "region", Value: "cdg1"}, - }), + }, nil), }, { name: "5. invalid multiple providers", @@ -128,7 +152,8 @@ func TestHandshakeProse(t *testing.T) { "AWS_EXECUTION_ENV": "AWS_Lambda_java8", "FUNCTIONS_WORKER_RUNTIME": "node", }, - want: clientMetadata(nil), + opts: nil, + want: clientMetadata(nil, nil), }, { name: "6. invalid long string", @@ -142,9 +167,10 @@ func TestHandshakeProse(t *testing.T) { return s }(), }, + opts: nil, want: clientMetadata(bson.D{ {Key: "name", Value: "aws.lambda"}, - }), + }, nil), }, { name: "7. invalid wrong types", @@ -152,16 +178,23 @@ func TestHandshakeProse(t *testing.T) { "AWS_EXECUTION_ENV": "AWS_Lambda_java8", "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "big", }, + opts: nil, want: clientMetadata(bson.D{ {Key: "name", Value: "aws.lambda"}, - }), + }, nil), }, { name: "8. Invalid - AWS_EXECUTION_ENV does not start with \"AWS_Lambda_\"", env: map[string]string{ "AWS_EXECUTION_ENV": "EC2", }, - want: clientMetadata(nil), + opts: nil, + want: clientMetadata(nil, nil), + }, + { + name: "driver info included", + opts: options.Client().SetDriverInfo(driverInfo), + want: clientMetadata(nil, driverInfo), }, } { test := test @@ -171,6 +204,10 @@ func TestHandshakeProse(t *testing.T) { mt.Setenv(k, v) } + if test.opts != nil { + mt.ResetClient(test.opts) + } + // Ping the server to ensure the handshake has completed. err := mt.Client.Ping(context.Background(), nil) require.NoError(mt, err, "Ping error: %v", err) diff --git a/internal/integration/initial_dns_seedlist_discovery_test.go b/internal/integration/initial_dns_seedlist_discovery_test.go index de9d44a058..24d1d5911e 100644 --- a/internal/integration/initial_dns_seedlist_discovery_test.go +++ b/internal/integration/initial_dns_seedlist_discovery_test.go @@ -74,7 +74,7 @@ func runSeedlistDiscoveryDirectory(mt *mtest.T, subdirectory string) { } // runSeedlistDiscoveryPingTest will create a new connection using the test URI and attempt to "ping" the server. -func runSeedlistDiscoveryPingTest(mt *mtest.T, clientOpts *options.ClientOptionsBuilder) { +func runSeedlistDiscoveryPingTest(mt *mtest.T, clientOpts *options.ClientOptions) { ctx := context.Background() client, err := mongo.Connect(clientOpts) diff --git a/internal/integration/json_helpers_test.go b/internal/integration/json_helpers_test.go index 51c46d546b..e3ccb5254e 100644 --- a/internal/integration/json_helpers_test.go +++ b/internal/integration/json_helpers_test.go @@ -67,7 +67,7 @@ func jsonFilesInDir(t testing.TB, dir string) []string { } // create client options from a map -func createClientOptions(t testing.TB, opts bson.Raw) *options.ClientOptionsBuilder { +func createClientOptions(t testing.TB, opts bson.Raw) *options.ClientOptions { t.Helper() clientOpts := options.Client() @@ -125,7 +125,7 @@ func createClientOptions(t testing.TB, opts bson.Raw) *options.ClientOptionsBuil return clientOpts } -func createAutoEncryptionOptions(t testing.TB, opts bson.Raw) *options.AutoEncryptionOptionsBuilder { +func createAutoEncryptionOptions(t testing.TB, opts bson.Raw) *options.AutoEncryptionOptions { t.Helper() aeo := options.AutoEncryption() diff --git a/internal/integration/mtest/mongotest.go b/internal/integration/mtest/mongotest.go index be1da51aeb..3967bf7f82 100644 --- a/internal/integration/mtest/mongotest.go +++ b/internal/integration/mtest/mongotest.go @@ -81,7 +81,7 @@ type T struct { // options copied to sub-tests clientType ClientType - clientOpts *options.ClientOptionsBuilder + clientOpts *options.ClientOptions collOpts *options.CollectionOptionsBuilder shareClient *bool @@ -359,7 +359,7 @@ func (t *T) ClearEvents() { // If t.Coll is not-nil, it will be reset to use the new client. Should only be called if the existing client is // not nil. This will Disconnect the existing client but will not drop existing collections. To do so, ClearCollections // must be called before calling ResetClient. -func (t *T) ResetClient(opts *options.ClientOptionsBuilder) { +func (t *T) ResetClient(opts *options.ClientOptions) { if opts != nil { t.clientOpts = opts } @@ -592,18 +592,13 @@ func (t *T) createTestClient() { clientOpts = options.Client().SetWriteConcern(MajorityWc).SetReadPreference(PrimaryRp) } - args, err := mongoutil.NewOptions[options.ClientOptions](clientOpts) - if err != nil { - t.Fatalf("failed to construct options from builder: %v", err) - } - // set ServerAPIOptions to latest version if required - if args.Deployment == nil && t.clientType != Mock && args.ServerAPIOptions == nil && testContext.requireAPIVersion { + if clientOpts.Deployment == nil && t.clientType != Mock && clientOpts.ServerAPIOptions == nil && testContext.requireAPIVersion { clientOpts.SetServerAPIOptions(options.ServerAPI(driver.TestServerAPIVersion)) } // Setup command monitor - var customMonitor = args.Monitor + var customMonitor = clientOpts.Monitor clientOpts.SetMonitor(&event.CommandMonitor{ Started: func(ctx context.Context, cse *event.CommandStartedEvent) { if customMonitor != nil && customMonitor.Started != nil { @@ -631,8 +626,8 @@ func (t *T) createTestClient() { }, }) // only specify connection pool monitor if no deployment is given - if args.Deployment == nil { - previousPoolMonitor := args.PoolMonitor + if clientOpts.Deployment == nil { + previousPoolMonitor := clientOpts.PoolMonitor clientOpts.SetPoolMonitor(&event.PoolMonitor{ Event: func(evt *event.PoolEvent) { @@ -650,6 +645,7 @@ func (t *T) createTestClient() { }) } + var err error switch t.clientType { case Pinned: // pin to first mongos @@ -658,15 +654,13 @@ func (t *T) createTestClient() { t.Client, err = mongo.Connect(uriOpts, clientOpts) case Mock: // clear pool monitor to avoid configuration error - args, _ = mongoutil.NewOptions[options.ClientOptions](clientOpts) - args.PoolMonitor = nil + clientOpts.PoolMonitor = nil t.mockDeployment = drivertest.NewMockDeployment() - args.Deployment = t.mockDeployment + clientOpts.Deployment = t.mockDeployment - opts := mongoutil.NewOptionsLister(args, nil) - t.Client, err = mongo.Connect(opts) + t.Client, err = mongo.Connect(clientOpts) case Proxy: t.proxyDialer = newProxyDialer() clientOpts.SetDialer(t.proxyDialer) @@ -676,8 +670,8 @@ func (t *T) createTestClient() { case Default: // Use a different set of options to specify the URI because clientOpts may already have a URI or host seedlist // specified. - var uriOpts *options.ClientOptionsBuilder - if args.Deployment == nil { + var uriOpts *options.ClientOptions + if clientOpts.Deployment == nil { // Only specify URI if the deployment is not set to avoid setting topology/server options along with the // deployment. uriOpts = options.Client().ApplyURI(testContext.connString.Original) diff --git a/internal/integration/mtest/options.go b/internal/integration/mtest/options.go index 4635ec267c..aff188b481 100644 --- a/internal/integration/mtest/options.go +++ b/internal/integration/mtest/options.go @@ -134,7 +134,7 @@ func (op *Options) CollectionOptions(opts *options.CollectionOptionsBuilder) *Op } // ClientOptions sets the options to use when creating a client for a test. -func (op *Options) ClientOptions(opts *options.ClientOptionsBuilder) *Options { +func (op *Options) ClientOptions(opts *options.ClientOptions) *Options { op.optFuncs = append(op.optFuncs, func(t *T) { t.clientOpts = opts }) diff --git a/internal/integration/mtest/setup.go b/internal/integration/mtest/setup.go index 38fe5b84bf..d4f181f8c7 100644 --- a/internal/integration/mtest/setup.go +++ b/internal/integration/mtest/setup.go @@ -18,7 +18,6 @@ import ( "go.mongodb.org/mongo-driver/v2/bson" "go.mongodb.org/mongo-driver/v2/internal/integtest" - "go.mongodb.org/mongo-driver/v2/internal/mongoutil" "go.mongodb.org/mongo-driver/v2/mongo" "go.mongodb.org/mongo-driver/v2/mongo/options" "go.mongodb.org/mongo-driver/v2/mongo/readpref" @@ -58,20 +57,15 @@ var testContext struct { serverless bool } -func setupClient(opts *options.ClientOptionsBuilder) (*mongo.Client, error) { - args, err := mongoutil.NewOptions[options.ClientOptions](opts) - if err != nil { - return nil, fmt.Errorf("failed to construct options from builder: %w", err) - } - +func setupClient(opts *options.ClientOptions) (*mongo.Client, error) { wcMajority := writeconcern.Majority() // set ServerAPIOptions to latest version if required - if args.ServerAPIOptions == nil && testContext.requireAPIVersion { + if opts.ServerAPIOptions == nil && testContext.requireAPIVersion { opts.SetServerAPIOptions(options.ServerAPI(driver.TestServerAPIVersion)) } // for sharded clusters, pin to one host. Due to how the cache is implemented on 4.0 and 4.2, behavior // can be inconsistent when multiple mongoses are used - return mongo.Connect(opts.SetWriteConcern(wcMajority).SetHosts(args.Hosts[:1])) + return mongo.Connect(opts.SetWriteConcern(wcMajority).SetHosts(opts.Hosts[:1])) } // Setup initializes the current testing context. diff --git a/internal/integration/sdam_error_handling_test.go b/internal/integration/sdam_error_handling_test.go index ceaa5a59f8..9668f52bb6 100644 --- a/internal/integration/sdam_error_handling_test.go +++ b/internal/integration/sdam_error_handling_test.go @@ -27,7 +27,7 @@ import ( func TestSDAMErrorHandling(t *testing.T) { mt := mtest.New(t, noClientOpts) - baseClientOpts := func() *options.ClientOptionsBuilder { + baseClientOpts := func() *options.ClientOptions { return options.Client(). ApplyURI(mtest.ClusterURI()). SetRetryWrites(false). diff --git a/internal/integration/server_selection_prose_test.go b/internal/integration/server_selection_prose_test.go index 05641dbf75..8bed374785 100644 --- a/internal/integration/server_selection_prose_test.go +++ b/internal/integration/server_selection_prose_test.go @@ -105,7 +105,7 @@ func runsServerSelection(mt *mtest.T, monitor *eventtest.TestPoolMonitor, } // TestServerSelectionProse implements the Server Selection prose tests: -// https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection-tests.rst +// https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection-tests.md func TestServerSelectionProse(t *testing.T) { const maxPoolSize = 10 const localThreshold = 30 * time.Second diff --git a/internal/integration/unified/client_entity.go b/internal/integration/unified/client_entity.go index 5c9dc88554..08b9fd7866 100644 --- a/internal/integration/unified/client_entity.go +++ b/internal/integration/unified/client_entity.go @@ -20,7 +20,6 @@ import ( "go.mongodb.org/mongo-driver/v2/internal/integration/mtest" "go.mongodb.org/mongo-driver/v2/internal/integtest" "go.mongodb.org/mongo-driver/v2/internal/logger" - "go.mongodb.org/mongo-driver/v2/internal/mongoutil" "go.mongodb.org/mongo-driver/v2/mongo" "go.mongodb.org/mongo-driver/v2/mongo/options" "go.mongodb.org/mongo-driver/v2/mongo/readconcern" @@ -184,15 +183,10 @@ func newClientEntity(ctx context.Context, em *EntityMap, entityOptions *entityOp } } if entityOptions.ServerAPIOptions != nil { - args, err := mongoutil.NewOptions[options.ServerAPIOptions](entityOptions.ServerAPIOptions) - if err != nil { - return nil, fmt.Errorf("failed to construct options from builder: %w", err) - } - - if err := args.ServerAPIVersion.Validate(); err != nil { + if err := entityOptions.ServerAPIOptions.ServerAPIVersion.Validate(); err != nil { return nil, err } - clientOpts.SetServerAPIOptions(entityOptions.ServerAPIOptions.ServerAPIOptionsBuilder) + clientOpts.SetServerAPIOptions(entityOptions.ServerAPIOptions.ServerAPIOptions) } else { integtest.AddTestServerAPIVersion(clientOpts) } @@ -589,7 +583,7 @@ func (c *clientEntity) getRecordEvents() bool { return c.recordEvents.Load().(bool) } -func setClientOptionsFromURIOptions(clientOpts *options.ClientOptionsBuilder, uriOpts bson.M) error { +func setClientOptionsFromURIOptions(clientOpts *options.ClientOptions, uriOpts bson.M) error { // A write concern can be constructed across multiple URI options (e.g. "w", "j", and "wTimeoutMS") so we declare an // empty writeConcern instance here that can be populated in the loop below. var wc writeConcern @@ -654,7 +648,7 @@ func setClientOptionsFromURIOptions(clientOpts *options.ClientOptionsBuilder, ur return nil } -func evaluateUseMultipleMongoses(clientOpts *options.ClientOptionsBuilder, useMultipleMongoses bool) error { +func evaluateUseMultipleMongoses(clientOpts *options.ClientOptions, useMultipleMongoses bool) error { hosts := mtest.ClusterConnString().Hosts if !useMultipleMongoses { diff --git a/internal/integration/unified/collection_operation_execution.go b/internal/integration/unified/collection_operation_execution.go index 2060db01e3..5617e5eb6b 100644 --- a/internal/integration/unified/collection_operation_execution.go +++ b/internal/integration/unified/collection_operation_execution.go @@ -523,6 +523,8 @@ func executeDistinct(ctx context.Context, operation *operation) (*operationResul val := elem.Value() switch key { + case "hint": + opts.SetHint(val) case "collation": collation, err := createCollation(val.Document()) if err != nil { @@ -1469,6 +1471,8 @@ func createFindCursor(ctx context.Context, operation *operation) (*cursorResult, opts.SetMin(val.Document()) case "noCursorTimeout": opts.SetNoCursorTimeout(val.Boolean()) + case "oplogReplay": + opts.SetOplogReplay(val.Boolean()) case "projection": opts.SetProjection(val.Document()) case "returnKey": diff --git a/internal/integration/unified/server_api_options.go b/internal/integration/unified/server_api_options.go index 72e24496de..e9379386d2 100644 --- a/internal/integration/unified/server_api_options.go +++ b/internal/integration/unified/server_api_options.go @@ -16,7 +16,7 @@ import ( // serverAPIOptions is a wrapper for *options.ServerAPIOptions. This type implements the bson.Unmarshaler interface // to convert BSON documents to a serverAPIOptions instance. type serverAPIOptions struct { - *options.ServerAPIOptionsBuilder + *options.ServerAPIOptions } type serverAPIVersion = options.ServerAPIVersion @@ -37,7 +37,7 @@ func (s *serverAPIOptions) UnmarshalBSON(data []byte) error { return fmt.Errorf("unrecognized fields for serverAPIOptions: %v", mapKeys(temp.Extra)) } - s.ServerAPIOptionsBuilder = options.ServerAPI(temp.ServerAPIVersion) + s.ServerAPIOptions = options.ServerAPI(temp.ServerAPIVersion) if temp.DeprecationErrors != nil { s.SetDeprecationErrors(*temp.DeprecationErrors) } diff --git a/internal/integration/unified/unified_spec_test.go b/internal/integration/unified/unified_spec_test.go index 8871a48127..eba98345f4 100644 --- a/internal/integration/unified/unified_spec_test.go +++ b/internal/integration/unified/unified_spec_test.go @@ -25,6 +25,7 @@ var ( "command-monitoring/logging", "connection-monitoring-and-pooling/logging", "sessions", + "retryable-reads/unified", "retryable-writes/unified", "client-side-encryption/unified", "client-side-operations-timeout", diff --git a/internal/integration/unified_spec_test.go b/internal/integration/unified_spec_test.go index 6b37dc254c..2406426d9c 100644 --- a/internal/integration/unified_spec_test.go +++ b/internal/integration/unified_spec_test.go @@ -27,8 +27,6 @@ import ( "go.mongodb.org/mongo-driver/v2/internal/failpoint" "go.mongodb.org/mongo-driver/v2/internal/integration/mtest" "go.mongodb.org/mongo-driver/v2/internal/integtest" - "go.mongodb.org/mongo-driver/v2/internal/mongoutil" - "go.mongodb.org/mongo-driver/v2/internal/require" "go.mongodb.org/mongo-driver/v2/mongo" "go.mongodb.org/mongo-driver/v2/mongo/address" "go.mongodb.org/mongo-driver/v2/mongo/options" @@ -178,7 +176,7 @@ const dataPath string = "../../testdata/" var directories = []string{ "transactions/legacy", "convenient-transactions", - "retryable-reads", + "retryable-reads/legacy", "read-write-concern/operation", "server-discovery-and-monitoring/integration", "atlas-data-lake-testing", @@ -287,38 +285,24 @@ func runSpecTestCase(mt *mtest.T, test *testCase, testFile testFile) { // Reset the client using the client options specified in the test. testClientOpts := createClientOptions(mt, test.ClientOptions) - args, err := mongoutil.NewOptions[options.ClientOptions](testClientOpts) - require.NoError(mt, err, "failed to construct options from builder") - // If AutoEncryptionOptions is set and AutoEncryption isn't disabled (neither // bypassAutoEncryption nor bypassQueryAnalysis are true), then add extra options to load // the crypt_shared library. - if args.AutoEncryptionOptions != nil { - aeArgs, err := mongoutil.NewOptions[options.AutoEncryptionOptions](args.AutoEncryptionOptions) - require.NoError(mt, err, "failed to construct options from builder") + if testClientOpts.AutoEncryptionOptions != nil { + aeOpts := testClientOpts.AutoEncryptionOptions - bypassAutoEncryption := aeArgs.BypassAutoEncryption != nil && *aeArgs.BypassAutoEncryption - bypassQueryAnalysis := aeArgs.BypassQueryAnalysis != nil && *aeArgs.BypassQueryAnalysis + bypassAutoEncryption := aeOpts.BypassAutoEncryption != nil && *aeOpts.BypassAutoEncryption + bypassQueryAnalysis := aeOpts.BypassQueryAnalysis != nil && *aeOpts.BypassQueryAnalysis if !bypassAutoEncryption && !bypassQueryAnalysis { - if aeArgs.ExtraOptions == nil { - aeArgs.ExtraOptions = make(map[string]interface{}) + if aeOpts.ExtraOptions == nil { + aeOpts.ExtraOptions = make(map[string]interface{}) } for k, v := range getCryptSharedLibExtraOptions() { - aeArgs.ExtraOptions[k] = v + aeOpts.ExtraOptions[k] = v } } - - args.AutoEncryptionOptions = &options.AutoEncryptionOptionsBuilder{ - Opts: []func(*options.AutoEncryptionOptions) error{ - func(args *options.AutoEncryptionOptions) error { - *args = *aeArgs - - return nil - }, - }, - } } test.monitor = newUnifiedRunnerEventMonitor() @@ -326,7 +310,7 @@ func runSpecTestCase(mt *mtest.T, test *testCase, testFile testFile) { Event: test.monitor.handlePoolEvent, }) testClientOpts.SetServerMonitor(test.monitor.sdamMonitor) - if args.HeartbeatInterval == nil { + if testClientOpts.HeartbeatInterval == nil { // If one isn't specified in the test, use a low heartbeat frequency so the Client will quickly recover when // using failpoints that cause SDAM state changes. testClientOpts.SetHeartbeatInterval(defaultHeartbeatInterval) diff --git a/internal/integtest/integtest.go b/internal/integtest/integtest.go index 2f09c4dc30..37ab6e4d77 100644 --- a/internal/integtest/integtest.go +++ b/internal/integtest/integtest.go @@ -78,7 +78,7 @@ func AddCompressorToURI(uri string) string { } // AddTestServerAPIVersion adds the latest server API version in a ServerAPIOptions to passed-in opts. -func AddTestServerAPIVersion(opts *options.ClientOptionsBuilder) { +func AddTestServerAPIVersion(opts *options.ClientOptions) { if os.Getenv("REQUIRE_API_VERSION") == "true" { opts.SetServerAPIOptions(options.ServerAPI(driver.TestServerAPIVersion)) } diff --git a/internal/mongoutil/mongoutil.go b/internal/mongoutil/mongoutil.go index f42dfd3da4..0345b96e8f 100644 --- a/internal/mongoutil/mongoutil.go +++ b/internal/mongoutil/mongoutil.go @@ -71,21 +71,15 @@ func NewOptionsLister[T any](args *T, callback func(*T) error) *OptionsLister[T] // AuthFromURI will create a Credentials object given the provided URI. func AuthFromURI(uri string) (*options.Credential, error) { - args, err := NewOptions[options.ClientOptions](options.Client().ApplyURI(uri)) - if err != nil { - return nil, err - } + opts := options.Client().ApplyURI(uri) - return args.Auth, nil + return opts.Auth, nil } // HostsFromURI will parse the hosts in the URI and return them as a slice of // strings. func HostsFromURI(uri string) ([]string, error) { - args, err := NewOptions[options.ClientOptions](options.Client().ApplyURI(uri)) - if err != nil { - return nil, err - } + opts := options.Client().ApplyURI(uri) - return args.Hosts, nil + return opts.Hosts, nil } diff --git a/internal/mongoutil/mongoutil_test.go b/internal/mongoutil/mongoutil_test.go index 522d47ce3e..661ee5f5bb 100644 --- a/internal/mongoutil/mongoutil_test.go +++ b/internal/mongoutil/mongoutil_test.go @@ -9,161 +9,13 @@ package mongoutil import ( "strings" "testing" - "time" - "go.mongodb.org/mongo-driver/v2/internal/assert" - "go.mongodb.org/mongo-driver/v2/internal/ptrutil" "go.mongodb.org/mongo-driver/v2/mongo/options" ) -func TestNewOptions(t *testing.T) { - t.Parallel() - - // For simplicity, we just chose one options type to test on. This should be - // WLOG since (1) a user cannot merge mixed options, and (2) exported data in - // the options package cannot be backwards breaking. If - // options-package-specific functionality needs to be tested, it should be - // done in a separate test. - clientTests := []struct { - name string - opts []options.Lister[options.ClientOptions] - want options.ClientOptions - }{ - { - name: "nil options", - opts: nil, - want: options.ClientOptions{}, - }, - { - name: "no options", - opts: []options.Lister[options.ClientOptions]{}, - want: options.ClientOptions{}, - }, - { - name: "one option", - opts: []options.Lister[options.ClientOptions]{ - options.Client().SetAppName("testApp"), - }, - want: options.ClientOptions{AppName: ptrutil.Ptr[string]("testApp")}, - }, - { - name: "one nil option", - opts: []options.Lister[options.ClientOptions]{nil}, - want: options.ClientOptions{}, - }, - { - name: "many same options", - opts: []options.Lister[options.ClientOptions]{ - options.Client().SetAppName("testApp"), - options.Client().SetAppName("testApp"), - }, - want: options.ClientOptions{AppName: ptrutil.Ptr[string]("testApp")}, - }, - { - name: "many different options (last one wins)", - opts: []options.Lister[options.ClientOptions]{ - options.Client().SetAppName("testApp1"), - options.Client().SetAppName("testApp2"), - }, - want: options.ClientOptions{AppName: ptrutil.Ptr[string]("testApp2")}, - }, - { - name: "many nil options", - opts: []options.Lister[options.ClientOptions]{nil, nil}, - want: options.ClientOptions{}, - }, - { - name: "many options where last is nil (non-nil wins)", - opts: []options.Lister[options.ClientOptions]{ - options.Client().SetAppName("testApp"), - nil, - }, - want: options.ClientOptions{AppName: ptrutil.Ptr[string]("testApp")}, - }, - { - name: "many nil options where first is nil (non-nil wins)", - opts: []options.Lister[options.ClientOptions]{ - nil, - options.Client().SetAppName("testApp"), - }, - want: options.ClientOptions{AppName: ptrutil.Ptr[string]("testApp")}, - }, - { - name: "many nil options where middle is non-nil (non-nil wins)", - opts: []options.Lister[options.ClientOptions]{ - nil, - options.Client().SetAppName("testApp"), - nil, - }, - want: options.ClientOptions{AppName: ptrutil.Ptr[string]("testApp")}, - }, - } - - for _, test := range clientTests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - got, err := NewOptions[options.ClientOptions](test.opts...) - assert.NoError(t, err) - - // WLOG it should be enough to test a small subset of arguments. - assert.Equal(t, test.want.AppName, got.AppName) - }) - } -} - -func TestNewOptionsLister(t *testing.T) { - t.Parallel() - - // For simplicity, we just chose one options type to test on. This should be - // WLOG since (1) a user cannot merge mixed options, and (2) exported data in - // the options package cannot be backwards breaking. If - // options-package-specific functionality needs to be tested, it should be - // done in a separate test. - clientTests := []struct { - name string - args *options.ClientOptions - want options.ClientOptions - }{ - { - name: "nil args", - args: nil, - want: options.ClientOptions{}, - }, - { - name: "no args", - args: &options.ClientOptions{}, - want: options.ClientOptions{}, - }, - { - name: "args", - args: &options.ClientOptions{AppName: ptrutil.Ptr[string]("testApp")}, - want: options.ClientOptions{AppName: ptrutil.Ptr[string]("testApp")}, - }, - } - - for _, test := range clientTests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - opts := NewOptionsLister(test.args, nil) - - got, err := NewOptions[options.ClientOptions](opts) - assert.NoError(t, err) - - // WLOG it should be enough to test a small subset of arguments. - assert.Equal(t, test.want.AppName, got.AppName) - }) - } -} - func BenchmarkNewOptions(b *testing.B) { b.Run("reflect.ValueOf is always called", func(b *testing.B) { - opts := make([]options.Lister[options.ClientOptions], b.N) + opts := make([]options.Lister[options.FindOptions], b.N) // Create a huge string to see if we can force reflect.ValueOf to use heap // over stack. @@ -171,28 +23,12 @@ func BenchmarkNewOptions(b *testing.B) { str := strings.Repeat("a", size) for i := 0; i < b.N; i++ { - opts[i] = options.Client().ApplyURI("x").SetAppName(str). - SetAuth(options.Credential{}).SetHosts([]string{"x", "y"}). - SetDirect(true).SetTimeout(time.Second) - } - - b.ReportAllocs() - for i := 0; i < b.N; i++ { - _, _ = NewOptions[options.ClientOptions](opts...) - } - }) - - b.Run("reflect.ValuOf is never called", func(b *testing.B) { - opts := make([]options.Lister[options.LoggerOptions], b.N) - - for i := 0; i < b.N; i++ { - var lo *options.LoggerOptionsBuilder - opts[i] = lo + opts[i] = options.Find().SetComment(str).SetHint("y").SetMin(1).SetMax(2) } b.ReportAllocs() for i := 0; i < b.N; i++ { - _, _ = NewOptions[options.LoggerOptions](opts...) + _, _ = NewOptions[options.FindOptions](opts...) } }) } diff --git a/internal/test/goleak/go.mod b/internal/test/goleak/go.mod new file mode 100644 index 0000000000..3f1fe31ac0 --- /dev/null +++ b/internal/test/goleak/go.mod @@ -0,0 +1,29 @@ +module go.mongodb.go/mongo-driver/internal/test/goleak + +go 1.22 + +replace go.mongodb.org/mongo-driver => ../../../ + +require ( + github.com/stretchr/testify v1.9.0 + go.mongodb.org/mongo-driver/v2 v2.0.0-beta2 + go.uber.org/goleak v1.3.0 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/kr/pretty v0.3.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect + github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/text v0.19.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/internal/test/goleak/go.sum b/internal/test/goleak/go.sum new file mode 100644 index 0000000000..9d10adb48a --- /dev/null +++ b/internal/test/goleak/go.sum @@ -0,0 +1,74 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver/v2 v2.0.0-beta2 h1:PRtbRKwblE8ZfI8qOhofcjn9y8CmKZI7trS5vDMeJX0= +go.mongodb.org/mongo-driver/v2 v2.0.0-beta2/go.mod h1:UGLb3ZgEzaY0cCbJpH9UFt9B6gEXiTPzsnJS38nBeoU= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/test/goleak/goleak_test.go b/internal/test/goleak/goleak_test.go new file mode 100644 index 0000000000..033668a338 --- /dev/null +++ b/internal/test/goleak/goleak_test.go @@ -0,0 +1,122 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package main + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.mongodb.org/mongo-driver/v2/bson" + "go.mongodb.org/mongo-driver/v2/mongo" + "go.mongodb.org/mongo-driver/v2/mongo/options" + "go.uber.org/goleak" +) + +var dbName = fmt.Sprintf("goleak-%d", time.Now().Unix()) + +// TestGoroutineLeak creates clients with various client configurations, runs +// some operations with each one, then disconnects the client. It asserts that +// no goroutines were leaked after the client is disconnected. +func TestGoroutineLeak(t *testing.T) { + testCases := []struct { + desc string + opts *options.ClientOptions + }{ + { + desc: "base", + opts: options.Client(), + }, + { + desc: "compressors=snappy", + opts: options.Client().SetCompressors([]string{"snappy"}), + }, + { + desc: "compressors=zlib", + opts: options.Client().SetCompressors([]string{"zlib"}), + }, + { + desc: "compressors=zstd", + opts: options.Client().SetCompressors([]string{"zstd"}), + }, + { + desc: "minPoolSize=10", + opts: options.Client().SetMinPoolSize(10), + }, + { + desc: "serverMonitoringMode=poll", + opts: options.Client().SetServerMonitoringMode(options.ServerMonitoringModePoll), + }, + } + + for _, tc := range testCases { + // These can't be run in parallel because goleak currently can't filter + // out goroutines from other parallel subtests. + t.Run(tc.desc, func(t *testing.T) { + defer goleak.VerifyNone(t) + + base := options.Client() + if u := os.Getenv("MONGODB_URI"); u != "" { + base.ApplyURI(u) + } + client, err := mongo.Connect(base, tc.opts) + require.NoError(t, err) + + defer func() { + err = client.Disconnect(context.Background()) + require.NoError(t, err) + }() + + db := client.Database(dbName) + defer func() { + err := db.Drop(context.Background()) + require.NoError(t, err) + }() + + coll := db.Collection(collectionName(t)) + + // Start a change stream to simulate a change listener workload. + cs, err := coll.Watch(context.Background(), mongo.Pipeline{}) + require.NoError(t, err) + defer cs.Close(context.Background()) + + // Run some Insert and FindOne operations to simulate a writing and + // reading workload. Run 50 iterations to increase the probability + // that a goroutine leak will happen if a problem exists. + for i := 0; i < 50; i++ { + _, err = coll.InsertOne(context.Background(), bson.M{"x": 123}) + require.NoError(t, err) + + var res bson.D + err = coll.FindOne(context.Background(), bson.D{}).Decode(&res) + require.NoError(t, err) + } + + // Intentionally cause some timeouts. Ignore any errors. + for i := 0; i < 50; i++ { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Microsecond) + coll.FindOne(ctx, bson.D{}).Err() + cancel() + } + + // Finish simulating the change listener workload. Use "Next" to + // fetch at least one change stream document batch and decode the + // first document. + cs.Next(context.Background()) + var res bson.D + err = cs.Decode(&res) + require.NoError(t, err) + }) + } +} + +func collectionName(t *testing.T) string { + return fmt.Sprintf("%s-%d", t.Name(), time.Now().Unix()) +} diff --git a/mongo/client.go b/mongo/client.go index ac0977190b..7c308f722a 100644 --- a/mongo/client.go +++ b/mongo/client.go @@ -107,7 +107,7 @@ type Client struct { // // The Client.Ping method can be used to verify that the deployment is successfully connected and the // Client was correctly configured. -func Connect(opts ...options.Lister[options.ClientOptions]) (*Client, error) { +func Connect(opts ...*options.ClientOptions) (*Client, error) { c, err := newClient(opts...) if err != nil { return nil, err @@ -132,11 +132,8 @@ func Connect(opts ...options.Lister[options.ClientOptions]) (*Client, error) { // option fields of previous options, there is no partial overwriting. For example, if Username is // set in the Auth field for the first option, and Password is set for the second but with no // Username, after the merge the Username field will be empty. -func newClient(opts ...options.Lister[options.ClientOptions]) (*Client, error) { - args, err := mongoutil.NewOptions(opts...) - if err != nil { - return nil, err - } +func newClient(opts ...*options.ClientOptions) (*Client, error) { + clientOpts := options.MergeClientOptions(opts...) id, err := uuid.New() if err != nil { @@ -149,92 +146,92 @@ func newClient(opts ...options.Lister[options.ClientOptions]) (*Client, error) { // LocalThreshold client.localThreshold = defaultLocalThreshold - if args.LocalThreshold != nil { - client.localThreshold = *args.LocalThreshold + if clientOpts.LocalThreshold != nil { + client.localThreshold = *clientOpts.LocalThreshold } // Monitor - if args.Monitor != nil { - client.monitor = args.Monitor + if clientOpts.Monitor != nil { + client.monitor = clientOpts.Monitor } // ServerMonitor - if args.ServerMonitor != nil { - client.serverMonitor = args.ServerMonitor + if clientOpts.ServerMonitor != nil { + client.serverMonitor = clientOpts.ServerMonitor } // ReadConcern client.readConcern = &readconcern.ReadConcern{} - if args.ReadConcern != nil { - client.readConcern = args.ReadConcern + if clientOpts.ReadConcern != nil { + client.readConcern = clientOpts.ReadConcern } // ReadPreference client.readPreference = readpref.Primary() - if args.ReadPreference != nil { - client.readPreference = args.ReadPreference + if clientOpts.ReadPreference != nil { + client.readPreference = clientOpts.ReadPreference } // BSONOptions - if args.BSONOptions != nil { - client.bsonOpts = args.BSONOptions + if clientOpts.BSONOptions != nil { + client.bsonOpts = clientOpts.BSONOptions } // Registry client.registry = defaultRegistry - if args.Registry != nil { - client.registry = args.Registry + if clientOpts.Registry != nil { + client.registry = clientOpts.Registry } // RetryWrites client.retryWrites = true // retry writes on by default - if args.RetryWrites != nil { - client.retryWrites = *args.RetryWrites + if clientOpts.RetryWrites != nil { + client.retryWrites = *clientOpts.RetryWrites } client.retryReads = true - if args.RetryReads != nil { - client.retryReads = *args.RetryReads + if clientOpts.RetryReads != nil { + client.retryReads = *clientOpts.RetryReads } // Timeout - client.timeout = args.Timeout - client.httpClient = args.HTTPClient + client.timeout = clientOpts.Timeout + client.httpClient = clientOpts.HTTPClient // WriteConcern - if args.WriteConcern != nil { - client.writeConcern = args.WriteConcern + if clientOpts.WriteConcern != nil { + client.writeConcern = clientOpts.WriteConcern } // AutoEncryptionOptions - if args.AutoEncryptionOptions != nil { + if clientOpts.AutoEncryptionOptions != nil { client.isAutoEncryptionSet = true - if err := client.configureAutoEncryption(args); err != nil { + if err := client.configureAutoEncryption(clientOpts); err != nil { return nil, err } } else { - client.cryptFLE = args.Crypt + client.cryptFLE = clientOpts.Crypt } // Deployment - if args.Deployment != nil { - client.deployment = args.Deployment + if clientOpts.Deployment != nil { + client.deployment = clientOpts.Deployment } // Set default options - if args.MaxPoolSize == nil { + if clientOpts.MaxPoolSize == nil { defaultMaxPoolSize := uint64(defaultMaxPoolSize) - args.MaxPoolSize = &defaultMaxPoolSize + clientOpts.MaxPoolSize = &defaultMaxPoolSize } - if args.Auth != nil { + if clientOpts.Auth != nil { client.authenticator, err = auth.CreateAuthenticator( - args.Auth.AuthMechanism, - topology.ConvertCreds(args.Auth), - args.HTTPClient, + clientOpts.Auth.AuthMechanism, + topology.ConvertCreds(clientOpts.Auth), + clientOpts.HTTPClient, ) if err != nil { return nil, fmt.Errorf("error creating authenticator: %w", err) } } - cfg, err := topology.NewConfigFromOptionsWithAuthenticator(args, client.clock, client.authenticator) + cfg, err := topology.NewConfigFromOptionsWithAuthenticator(clientOpts, client.clock, client.authenticator) if err != nil { return nil, err } var connectTimeout time.Duration - if args.ConnectTimeout != nil { - connectTimeout = *args.ConnectTimeout + if clientOpts.ConnectTimeout != nil { + connectTimeout = *clientOpts.ConnectTimeout } client.serverAPI = topology.ServerAPIFromServerOptions(connectTimeout, cfg.ServerOpts) @@ -247,7 +244,7 @@ func newClient(opts ...options.Lister[options.ClientOptions]) (*Client, error) { } // Create a logger for the client. - client.logger, err = newLogger(args.LoggerOptions) + client.logger, err = newLogger(clientOpts.LoggerOptions) if err != nil { return nil, fmt.Errorf("invalid logger options: %w", err) } @@ -472,15 +469,11 @@ func (c *Client) endSessions(ctx context.Context) { } func (c *Client) configureAutoEncryption(args *options.ClientOptions) error { - aeArgs, err := mongoutil.NewOptions[options.AutoEncryptionOptions](args.AutoEncryptionOptions) - if err != nil { - return fmt.Errorf("failed to construct options from builder: %w", err) - } - - c.encryptedFieldsMap = aeArgs.EncryptedFieldsMap + c.encryptedFieldsMap = args.AutoEncryptionOptions.EncryptedFieldsMap if err := c.configureKeyVaultClientFLE(args); err != nil { return err } + if err := c.configureMetadataClientFLE(args); err != nil { return err } @@ -513,68 +506,57 @@ func (c *Client) getOrCreateInternalClient(args *options.ClientOptions) (*Client argsCopy.AutoEncryptionOptions = nil argsCopy.MinPoolSize = ptrutil.Ptr[uint64](0) - opts := mongoutil.NewOptionsLister(&argsCopy, nil) - var err error - c.internalClientFLE, err = newClient(opts) + c.internalClientFLE, err = newClient(&argsCopy) return c.internalClientFLE, err } -func (c *Client) configureKeyVaultClientFLE(clientArgs *options.ClientOptions) error { - // parse key vault options and create new key vault client - aeArgs, err := mongoutil.NewOptions[options.AutoEncryptionOptions](clientArgs.AutoEncryptionOptions) - if err != nil { - return fmt.Errorf("failed to construct options from builder: %w", err) - } +func (c *Client) configureKeyVaultClientFLE(clientOpts *options.ClientOptions) error { + aeOpts := clientOpts.AutoEncryptionOptions + + var err error switch { - case aeArgs.KeyVaultClientOptions != nil: - c.keyVaultClientFLE, err = newClient(aeArgs.KeyVaultClientOptions) - case clientArgs.MaxPoolSize != nil && *clientArgs.MaxPoolSize == 0: + case aeOpts.KeyVaultClientOptions != nil: + c.keyVaultClientFLE, err = newClient(aeOpts.KeyVaultClientOptions) + case clientOpts.MaxPoolSize != nil && *clientOpts.MaxPoolSize == 0: c.keyVaultClientFLE = c default: - c.keyVaultClientFLE, err = c.getOrCreateInternalClient(clientArgs) + c.keyVaultClientFLE, err = c.getOrCreateInternalClient(clientOpts) } if err != nil { return err } - dbName, collName := splitNamespace(aeArgs.KeyVaultNamespace) + dbName, collName := splitNamespace(aeOpts.KeyVaultNamespace) c.keyVaultCollFLE = c.keyVaultClientFLE.Database(dbName).Collection(collName, keyVaultCollOpts) return nil } -func (c *Client) configureMetadataClientFLE(clientArgs *options.ClientOptions) error { - // parse key vault options and create new key vault client - aeArgs, err := mongoutil.NewOptions[options.AutoEncryptionOptions](clientArgs.AutoEncryptionOptions) - if err != nil { - return fmt.Errorf("failed to construct options from builder: %w", err) - } +func (c *Client) configureMetadataClientFLE(clientOpts *options.ClientOptions) error { + aeOpts := clientOpts.AutoEncryptionOptions - if aeArgs.BypassAutoEncryption != nil && *aeArgs.BypassAutoEncryption { + if aeOpts.BypassAutoEncryption != nil && *aeOpts.BypassAutoEncryption { // no need for a metadata client. return nil } - if clientArgs.MaxPoolSize != nil && *clientArgs.MaxPoolSize == 0 { + if clientOpts.MaxPoolSize != nil && *clientOpts.MaxPoolSize == 0 { c.metadataClientFLE = c return nil } - c.metadataClientFLE, err = c.getOrCreateInternalClient(clientArgs) + var err error + c.metadataClientFLE, err = c.getOrCreateInternalClient(clientOpts) + return err } -func (c *Client) newMongoCrypt(opts options.Lister[options.AutoEncryptionOptions]) (*mongocrypt.MongoCrypt, error) { - args, err := mongoutil.NewOptions[options.AutoEncryptionOptions](opts) - if err != nil { - return nil, fmt.Errorf("failed to construct options from builder: %w", err) - } - +func (c *Client) newMongoCrypt(opts *options.AutoEncryptionOptions) (*mongocrypt.MongoCrypt, error) { // convert schemas in SchemaMap to bsoncore documents cryptSchemaMap := make(map[string]bsoncore.Document) - for k, v := range args.SchemaMap { + for k, v := range opts.SchemaMap { schema, err := marshal(v, c.bsonOpts, c.registry) if err != nil { return nil, err @@ -584,7 +566,7 @@ func (c *Client) newMongoCrypt(opts options.Lister[options.AutoEncryptionOptions // convert schemas in EncryptedFieldsMap to bsoncore documents cryptEncryptedFieldsMap := make(map[string]bsoncore.Document) - for k, v := range args.EncryptedFieldsMap { + for k, v := range opts.EncryptedFieldsMap { encryptedFields, err := marshal(v, c.bsonOpts, c.registry) if err != nil { return nil, err @@ -592,7 +574,7 @@ func (c *Client) newMongoCrypt(opts options.Lister[options.AutoEncryptionOptions cryptEncryptedFieldsMap[k] = encryptedFields } - kmsProviders, err := marshal(args.KmsProviders, c.bsonOpts, c.registry) + kmsProviders, err := marshal(opts.KmsProviders, c.bsonOpts, c.registry) if err != nil { return nil, fmt.Errorf("error creating KMS providers document: %w", err) } @@ -600,7 +582,7 @@ func (c *Client) newMongoCrypt(opts options.Lister[options.AutoEncryptionOptions // Set the crypt_shared library override path from the "cryptSharedLibPath" extra option if one // was set. cryptSharedLibPath := "" - if val, ok := args.ExtraOptions["cryptSharedLibPath"]; ok { + if val, ok := opts.ExtraOptions["cryptSharedLibPath"]; ok { str, ok := val.(string) if !ok { return nil, fmt.Errorf( @@ -613,12 +595,12 @@ func (c *Client) newMongoCrypt(opts options.Lister[options.AutoEncryptionOptions // intended for use from tests; there is no supported public API for explicitly disabling // loading the crypt_shared library. cryptSharedLibDisabled := false - if v, ok := args.ExtraOptions["__cryptSharedLibDisabledForTestOnly"]; ok { + if v, ok := opts.ExtraOptions["__cryptSharedLibDisabledForTestOnly"]; ok { cryptSharedLibDisabled = v.(bool) } - bypassAutoEncryption := args.BypassAutoEncryption != nil && *args.BypassAutoEncryption - bypassQueryAnalysis := args.BypassQueryAnalysis != nil && *args.BypassQueryAnalysis + bypassAutoEncryption := opts.BypassAutoEncryption != nil && *opts.BypassAutoEncryption + bypassQueryAnalysis := opts.BypassQueryAnalysis != nil && *opts.BypassQueryAnalysis mc, err := mongocrypt.NewMongoCrypt(mcopts.MongoCrypt(). SetKmsProviders(kmsProviders). @@ -627,13 +609,13 @@ func (c *Client) newMongoCrypt(opts options.Lister[options.AutoEncryptionOptions SetEncryptedFieldsMap(cryptEncryptedFieldsMap). SetCryptSharedLibDisabled(cryptSharedLibDisabled || bypassAutoEncryption). SetCryptSharedLibOverridePath(cryptSharedLibPath). - SetHTTPClient(args.HTTPClient)) + SetHTTPClient(opts.HTTPClient)) if err != nil { return nil, err } var cryptSharedLibRequired bool - if val, ok := args.ExtraOptions["cryptSharedLibRequired"]; ok { + if val, ok := opts.ExtraOptions["cryptSharedLibRequired"]; ok { b, ok := val.(bool) if !ok { return nil, fmt.Errorf( @@ -654,10 +636,8 @@ func (c *Client) newMongoCrypt(opts options.Lister[options.AutoEncryptionOptions } //nolint:unused // the unused linter thinks that this function is unreachable because "c.newMongoCrypt" always panics without the "cse" build tag set. -func (c *Client) configureCryptFLE(mc *mongocrypt.MongoCrypt, opts options.Lister[options.AutoEncryptionOptions]) { - args, _ := mongoutil.NewOptions[options.AutoEncryptionOptions](opts) - - bypass := args.BypassAutoEncryption != nil && *args.BypassAutoEncryption +func (c *Client) configureCryptFLE(mc *mongocrypt.MongoCrypt, opts *options.AutoEncryptionOptions) { + bypass := opts.BypassAutoEncryption != nil && *opts.BypassAutoEncryption kr := keyRetriever{coll: c.keyVaultCollFLE} var cir collInfoRetriever // If bypass is true, c.metadataClientFLE is nil and the collInfoRetriever @@ -672,7 +652,7 @@ func (c *Client) configureCryptFLE(mc *mongocrypt.MongoCrypt, opts options.Liste CollInfoFn: cir.cryptCollInfo, KeyFn: kr.cryptKeys, MarkFn: c.mongocryptdFLE.markCommand, - TLSConfig: args.TLSConfig, + TLSConfig: opts.TLSConfig, BypassAutoEncryption: bypass, }) } @@ -974,28 +954,23 @@ func (c *Client) BulkWrite(ctx context.Context, models *ClientWriteModels, // newLogger will use the LoggerOptions to create an internal logger and publish // messages using a LogSink. -func newLogger(opts options.Lister[options.LoggerOptions]) (*logger.Logger, error) { +func newLogger(opts *options.LoggerOptions) (*logger.Logger, error) { // If there are no logger options, then create a default logger. if opts == nil { opts = options.Logger() } - args, err := mongoutil.NewOptions[options.LoggerOptions](opts) - if err != nil { - return nil, fmt.Errorf("failed to construct options from builder: %w", err) - } - // If there are no component-level options and the environment does not // contain component variables, then do nothing. - if len(args.ComponentLevels) == 0 && !logger.EnvHasComponentVariables() { + if len(opts.ComponentLevels) == 0 && !logger.EnvHasComponentVariables() { return nil, nil } // Otherwise, collect the component-level options and create a logger. componentLevels := make(map[logger.Component]logger.Level) - for component, level := range args.ComponentLevels { + for component, level := range opts.ComponentLevels { componentLevels[logger.Component(component)] = logger.Level(level) } - return logger.New(args.Sink, args.MaxDocumentLength, componentLevels) + return logger.New(opts.Sink, opts.MaxDocumentLength, componentLevels) } diff --git a/mongo/client_test.go b/mongo/client_test.go index e1226610d8..8d0c4245dc 100644 --- a/mongo/client_test.go +++ b/mongo/client_test.go @@ -19,7 +19,6 @@ import ( "go.mongodb.org/mongo-driver/v2/event" "go.mongodb.org/mongo-driver/v2/internal/assert" "go.mongodb.org/mongo-driver/v2/internal/integtest" - "go.mongodb.org/mongo-driver/v2/internal/mongoutil" "go.mongodb.org/mongo-driver/v2/internal/require" "go.mongodb.org/mongo-driver/v2/mongo/options" "go.mongodb.org/mongo-driver/v2/mongo/readconcern" @@ -32,7 +31,7 @@ import ( var bgCtx = context.Background() -func setupClient(opts ...options.Lister[options.ClientOptions]) *Client { +func setupClient(opts ...*options.ClientOptions) *Client { if len(opts) == 0 { clientOpts := options.Client().ApplyURI("mongodb://localhost:27017") integtest.AddTestServerAPIVersion(clientOpts) @@ -129,7 +128,7 @@ func TestClient(t *testing.T) { t.Run("localThreshold", func(t *testing.T) { testCases := []struct { name string - opts *options.ClientOptionsBuilder + opts *options.ClientOptions expectedThreshold time.Duration }{ {"default", options.Client(), defaultLocalThreshold}, @@ -151,7 +150,7 @@ func TestClient(t *testing.T) { t.Run("min pool size from Set*PoolSize()", func(t *testing.T) { testCases := []struct { name string - opts *options.ClientOptionsBuilder + opts *options.ClientOptions err error }{ { @@ -195,7 +194,7 @@ func TestClient(t *testing.T) { t.Run("min pool size from ApplyURI()", func(t *testing.T) { testCases := []struct { name string - opts *options.ClientOptionsBuilder + opts *options.ClientOptions err error }{ { @@ -242,7 +241,7 @@ func TestClient(t *testing.T) { testCases := []struct { name string - opts *options.ClientOptionsBuilder + opts *options.ClientOptions expectErr bool expectedRetry bool }{ @@ -270,7 +269,7 @@ func TestClient(t *testing.T) { testCases := []struct { name string - opts *options.ClientOptionsBuilder + opts *options.ClientOptions expectErr bool expectedRetry bool }{ @@ -318,8 +317,7 @@ func TestClient(t *testing.T) { uri := "mongodb://localhost:27017/foobar" opts := options.Client().ApplyURI(uri) - args, _ := mongoutil.NewOptions[options.ClientOptions](opts) - got := args.GetURI() + got := opts.GetURI() assert.Equal(t, uri, got, "expected GetURI to return %v, got %v", uri, got) }) @@ -416,7 +414,7 @@ func TestClient(t *testing.T) { } }) t.Run("serverAPI version", func(t *testing.T) { - getServerAPIOptions := func() *options.ServerAPIOptionsBuilder { + getServerAPIOptions := func() *options.ServerAPIOptions { return options.ServerAPI(options.ServerAPIVersion1). SetStrict(false).SetDeprecationErrors(false) } diff --git a/mongo/collection.go b/mongo/collection.go index 4c2cfef4f0..3489f12f4f 100644 --- a/mongo/collection.go +++ b/mongo/collection.go @@ -1281,6 +1281,16 @@ func (coll *Collection) Distinct( } op.Comment(comment) } + if args.Hint != nil { + if isUnorderedMap(args.Hint) { + return &DistinctResult{err: ErrMapForOrderedArgument{"hint"}} + } + hint, err := marshalValue(args.Hint, coll.bsonOpts, coll.registry) + if err != nil { + return &DistinctResult{err: err} + } + op.Hint(hint) + } retry := driver.RetryNone if coll.client.retryReads { retry = driver.RetryOncePerCommand @@ -1455,6 +1465,9 @@ func (coll *Collection) find( if args.NoCursorTimeout != nil { op.NoCursorTimeout(*args.NoCursorTimeout) } + if args.OplogReplay != nil { + op.OplogReplay(*args.OplogReplay) + } if args.Projection != nil { proj, err := marshal(args.Projection, coll.bsonOpts, coll.registry) if err != nil { @@ -1508,6 +1521,7 @@ func newFindArgsFromFindOneArgs(args *options.FindOneOptions) *options.FindOptio v.Hint = args.Hint v.Max = args.Max v.Min = args.Min + v.OplogReplay = args.OplogReplay v.Projection = args.Projection v.ReturnKey = args.ReturnKey v.ShowRecordID = args.ShowRecordID diff --git a/mongo/gridfs_download_stream.go b/mongo/gridfs_download_stream.go index 1cc9bf65fd..29df84cc0a 100644 --- a/mongo/gridfs_download_stream.go +++ b/mongo/gridfs_download_stream.go @@ -52,7 +52,7 @@ type GridFSDownloadStream struct { // GridFSDownloadStream.GetFile method. type GridFSFile struct { // ID is the file's ID. This will match the file ID specified when uploading the file. If an upload helper that - // does not require a file ID was used, this field will be a primitive.ObjectID. + // does not require a file ID was used, this field will be a bson.ObjectID. ID interface{} // Length is the length of this file in bytes. diff --git a/mongo/mongocryptd.go b/mongo/mongocryptd.go index cedc48381c..9e6e9daf11 100644 --- a/mongo/mongocryptd.go +++ b/mongo/mongocryptd.go @@ -8,12 +8,10 @@ package mongo import ( "context" - "fmt" "os/exec" "strings" "time" - "go.mongodb.org/mongo-driver/v2/internal/mongoutil" "go.mongodb.org/mongo-driver/v2/mongo/options" "go.mongodb.org/mongo-driver/v2/mongo/readconcern" "go.mongodb.org/mongo-driver/v2/mongo/readpref" @@ -40,24 +38,19 @@ type mongocryptdClient struct { // newMongocryptdClient creates a client to mongocryptd. // newMongocryptdClient is expected to not be called if the crypt shared library is available. // The crypt shared library replaces all mongocryptd functionality. -func newMongocryptdClient(opts options.Lister[options.AutoEncryptionOptions]) (*mongocryptdClient, error) { +func newMongocryptdClient(opts *options.AutoEncryptionOptions) (*mongocryptdClient, error) { // create mcryptClient instance and spawn process if necessary var bypassSpawn bool var bypassAutoEncryption bool - args, err := mongoutil.NewOptions[options.AutoEncryptionOptions](opts) - if err != nil { - return nil, fmt.Errorf("failed to construct options from builder: %w", err) - } - - if bypass, ok := args.ExtraOptions["mongocryptdBypassSpawn"]; ok { + if bypass, ok := opts.ExtraOptions["mongocryptdBypassSpawn"]; ok { bypassSpawn = bypass.(bool) } - if args.BypassAutoEncryption != nil { - bypassAutoEncryption = *args.BypassAutoEncryption + if opts.BypassAutoEncryption != nil { + bypassAutoEncryption = *opts.BypassAutoEncryption } - bypassQueryAnalysis := args.BypassQueryAnalysis != nil && *args.BypassQueryAnalysis + bypassQueryAnalysis := opts.BypassQueryAnalysis != nil && *opts.BypassQueryAnalysis mc := &mongocryptdClient{ // mongocryptd should not be spawned if any of these conditions are true: @@ -68,7 +61,7 @@ func newMongocryptdClient(opts options.Lister[options.AutoEncryptionOptions]) (* } if !mc.bypassSpawn { - mc.path, mc.spawnArgs = createSpawnArgs(args.ExtraOptions) + mc.path, mc.spawnArgs = createSpawnArgs(opts.ExtraOptions) if err := mc.spawnProcess(); err != nil { return nil, err } @@ -76,7 +69,7 @@ func newMongocryptdClient(opts options.Lister[options.AutoEncryptionOptions]) (* // get connection string uri := defaultURI - if u, ok := args.ExtraOptions["mongocryptdURI"]; ok { + if u, ok := opts.ExtraOptions["mongocryptdURI"]; ok { uri = u.(string) } diff --git a/mongo/ocsp_test.go b/mongo/ocsp_test.go index b180caa2a5..d231b21b6d 100644 --- a/mongo/ocsp_test.go +++ b/mongo/ocsp_test.go @@ -16,7 +16,6 @@ import ( "go.mongodb.org/mongo-driver/v2/internal/assert" "go.mongodb.org/mongo-driver/v2/internal/integtest" - "go.mongodb.org/mongo-driver/v2/internal/mongoutil" "go.mongodb.org/mongo-driver/v2/mongo/options" "go.mongodb.org/mongo-driver/v2/mongo/readpref" ) @@ -57,7 +56,7 @@ func TestOCSP(t *testing.T) { }) } -func createOCSPClientOptions(uri string) *options.ClientOptionsBuilder { +func createOCSPClientOptions(uri string) *options.ClientOptions { opts := options.Client().ApplyURI(uri) timeout := 500 * time.Millisecond @@ -69,14 +68,12 @@ func createOCSPClientOptions(uri string) *options.ClientOptionsBuilder { return opts } -func createInsecureOCSPClientOptions(uri string) *options.ClientOptionsBuilder { +func createInsecureOCSPClientOptions(uri string) *options.ClientOptions { opts := createOCSPClientOptions(uri) - args, _ := mongoutil.NewOptions[options.ClientOptions](opts) - - if args.TLSConfig != nil { - args.TLSConfig.InsecureSkipVerify = true - opts.SetTLSConfig(args.TLSConfig) + if opts.TLSConfig != nil { + opts.TLSConfig.InsecureSkipVerify = true + opts.SetTLSConfig(opts.TLSConfig) return opts } diff --git a/mongo/options/autoencryptionoptions.go b/mongo/options/autoencryptionoptions.go index 180b90e676..c630659c5a 100644 --- a/mongo/options/autoencryptionoptions.go +++ b/mongo/options/autoencryptionoptions.go @@ -9,6 +9,8 @@ package options import ( "crypto/tls" "net/http" + + "go.mongodb.org/mongo-driver/v2/internal/httputil" ) // AutoEncryptionOptions represents arguments used to configure auto encryption/decryption behavior for a mongo.Client @@ -28,7 +30,7 @@ import ( // // See corresponding setter methods for documentation. type AutoEncryptionOptions struct { - KeyVaultClientOptions Lister[ClientOptions] + KeyVaultClientOptions *ClientOptions KeyVaultNamespace string KmsProviders map[string]map[string]interface{} SchemaMap map[string]interface{} @@ -40,29 +42,11 @@ type AutoEncryptionOptions struct { BypassQueryAnalysis *bool } -// AutoEncryptionOptionsBuilder contains options to configure automatic -// encryption for operations. Each option can be set through setter functions. -// See documentation for each setter function for an explanation of the option. -type AutoEncryptionOptionsBuilder struct { - Opts []func(*AutoEncryptionOptions) error -} - // AutoEncryption creates a new AutoEncryptionOptions configured with default values. -func AutoEncryption() *AutoEncryptionOptionsBuilder { - opts := &AutoEncryptionOptionsBuilder{} - - opts.Opts = append(opts.Opts, func(args *AutoEncryptionOptions) error { - args.HTTPClient = http.DefaultClient - - return nil - }) - - return opts -} - -// List returns a list of AutoEncryptionOptions setter functions. -func (a *AutoEncryptionOptionsBuilder) List() []func(*AutoEncryptionOptions) error { - return a.Opts +func AutoEncryption() *AutoEncryptionOptions { + return &AutoEncryptionOptions{ + HTTPClient: httputil.DefaultHTTPClient, + } } // SetKeyVaultClientOptions specifies options for the client used to communicate with the key vault collection. @@ -74,34 +58,22 @@ func (a *AutoEncryptionOptionsBuilder) List() []func(*AutoEncryptionOptions) err // (and created if necessary). The internal mongo.Client may be shared during automatic encryption (if // BypassAutomaticEncryption is false). The internal mongo.Client is configured with the same options as the target // mongo.Client except minPoolSize is set to 0 and AutoEncryptionOptions is omitted. -func (a *AutoEncryptionOptionsBuilder) SetKeyVaultClientOptions(opts Lister[ClientOptions]) *AutoEncryptionOptionsBuilder { - a.Opts = append(a.Opts, func(args *AutoEncryptionOptions) error { - args.KeyVaultClientOptions = opts - - return nil - }) +func (a *AutoEncryptionOptions) SetKeyVaultClientOptions(opts *ClientOptions) *AutoEncryptionOptions { + a.KeyVaultClientOptions = opts return a } // SetKeyVaultNamespace specifies the namespace of the key vault collection. This is required. -func (a *AutoEncryptionOptionsBuilder) SetKeyVaultNamespace(ns string) *AutoEncryptionOptionsBuilder { - a.Opts = append(a.Opts, func(args *AutoEncryptionOptions) error { - args.KeyVaultNamespace = ns - - return nil - }) +func (a *AutoEncryptionOptions) SetKeyVaultNamespace(ns string) *AutoEncryptionOptions { + a.KeyVaultNamespace = ns return a } // SetKmsProviders specifies options for KMS providers. This is required. -func (a *AutoEncryptionOptionsBuilder) SetKmsProviders(providers map[string]map[string]interface{}) *AutoEncryptionOptionsBuilder { - a.Opts = append(a.Opts, func(args *AutoEncryptionOptions) error { - args.KmsProviders = providers - - return nil - }) +func (a *AutoEncryptionOptions) SetKmsProviders(providers map[string]map[string]interface{}) *AutoEncryptionOptions { + a.KmsProviders = providers return a } @@ -113,12 +85,8 @@ func (a *AutoEncryptionOptionsBuilder) SetKmsProviders(providers map[string]map[ // Supplying a schemaMap provides more security than relying on JSON Schemas obtained from the server. It protects // against a malicious server advertising a false JSON Schema, which could trick the client into sending unencrypted // data that should be encrypted. -func (a *AutoEncryptionOptionsBuilder) SetSchemaMap(schemaMap map[string]interface{}) *AutoEncryptionOptionsBuilder { - a.Opts = append(a.Opts, func(args *AutoEncryptionOptions) error { - args.SchemaMap = schemaMap - - return nil - }) +func (a *AutoEncryptionOptions) SetSchemaMap(schemaMap map[string]interface{}) *AutoEncryptionOptions { + a.SchemaMap = schemaMap return a } @@ -131,12 +99,8 @@ func (a *AutoEncryptionOptionsBuilder) SetSchemaMap(schemaMap map[string]interfa // (and created if necessary). The internal mongo.Client may be shared for key vault operations (if KeyVaultClient is // unset). The internal mongo.Client is configured with the same options as the target mongo.Client except minPoolSize // is set to 0 and AutoEncryptionOptions is omitted. -func (a *AutoEncryptionOptionsBuilder) SetBypassAutoEncryption(bypass bool) *AutoEncryptionOptionsBuilder { - a.Opts = append(a.Opts, func(args *AutoEncryptionOptions) error { - args.BypassAutoEncryption = &bypass - - return nil - }) +func (a *AutoEncryptionOptions) SetBypassAutoEncryption(bypass bool) *AutoEncryptionOptions { + a.BypassAutoEncryption = &bypass return a } @@ -170,58 +134,33 @@ func (a *AutoEncryptionOptionsBuilder) SetBypassAutoEncryption(bypass bool) *Aut // absolute path to the directory containing the linked libmongocrypt library. Setting an override // path disables the default system library search path. If an override path is specified but the // crypt_shared library cannot be loaded, Client creation will return an error. Must be a string. -func (a *AutoEncryptionOptionsBuilder) SetExtraOptions(extraOpts map[string]interface{}) *AutoEncryptionOptionsBuilder { - a.Opts = append(a.Opts, func(args *AutoEncryptionOptions) error { - args.ExtraOptions = extraOpts - - return nil - }) +func (a *AutoEncryptionOptions) SetExtraOptions(extraOpts map[string]interface{}) *AutoEncryptionOptions { + a.ExtraOptions = extraOpts return a } // SetTLSConfig specifies tls.Config instances for each KMS provider to use to configure TLS on all connections created // to the KMS provider. -// -// This should only be used to set custom TLS configurations. By default, the connection will use an empty tls.Config{} with MinVersion set to tls.VersionTLS12. -func (a *AutoEncryptionOptionsBuilder) SetTLSConfig(tlsOpts map[string]*tls.Config) *AutoEncryptionOptionsBuilder { - a.Opts = append(a.Opts, func(args *AutoEncryptionOptions) error { - tlsConfigs := make(map[string]*tls.Config) - for provider, config := range tlsOpts { - // use TLS min version 1.2 to enforce more secure hash algorithms and advanced cipher suites - if config.MinVersion == 0 { - config.MinVersion = tls.VersionTLS12 - } - tlsConfigs[provider] = config - } - args.TLSConfig = tlsConfigs - - return nil - }) +func (a *AutoEncryptionOptions) SetTLSConfig(cfg map[string]*tls.Config) *AutoEncryptionOptions { + // This should only be used to set custom TLS configurations. By default, the connection will use an empty tls.Config{} with MinVersion set to tls.VersionTLS12. + a.TLSConfig = cfg return a } // SetEncryptedFieldsMap specifies a map from namespace to local EncryptedFieldsMap document. // EncryptedFieldsMap is used for Queryable Encryption. -func (a *AutoEncryptionOptionsBuilder) SetEncryptedFieldsMap(ef map[string]interface{}) *AutoEncryptionOptionsBuilder { - a.Opts = append(a.Opts, func(args *AutoEncryptionOptions) error { - args.EncryptedFieldsMap = ef - - return nil - }) +func (a *AutoEncryptionOptions) SetEncryptedFieldsMap(ef map[string]interface{}) *AutoEncryptionOptions { + a.EncryptedFieldsMap = ef return a } // SetBypassQueryAnalysis specifies whether or not query analysis should be used for automatic encryption. // Use this option when using explicit encryption with Queryable Encryption. -func (a *AutoEncryptionOptionsBuilder) SetBypassQueryAnalysis(bypass bool) *AutoEncryptionOptionsBuilder { - a.Opts = append(a.Opts, func(args *AutoEncryptionOptions) error { - args.BypassQueryAnalysis = &bypass - - return nil - }) +func (a *AutoEncryptionOptions) SetBypassQueryAnalysis(bypass bool) *AutoEncryptionOptions { + a.BypassQueryAnalysis = &bypass return a } diff --git a/mongo/options/clientencryptionoptions.go b/mongo/options/clientencryptionoptions.go index 2d6d5f0e61..3f9b3745ed 100644 --- a/mongo/options/clientencryptionoptions.go +++ b/mongo/options/clientencryptionoptions.go @@ -70,19 +70,13 @@ func (c *ClientEncryptionOptionsBuilder) SetKmsProviders(providers map[string]ma // to the KMS provider. // // This should only be used to set custom TLS configurations. By default, the connection will use an empty tls.Config{} with MinVersion set to tls.VersionTLS12. -func (c *ClientEncryptionOptionsBuilder) SetTLSConfig(tlsOpts map[string]*tls.Config) *ClientEncryptionOptionsBuilder { +func (c *ClientEncryptionOptionsBuilder) SetTLSConfig(cfg map[string]*tls.Config) *ClientEncryptionOptionsBuilder { c.Opts = append(c.Opts, func(opts *ClientEncryptionOptions) error { - tlsConfigs := make(map[string]*tls.Config) - for provider, config := range tlsOpts { - // use TLS min version 1.2 to enforce more secure hash algorithms and advanced cipher suites - if config.MinVersion == 0 { - config.MinVersion = tls.VersionTLS12 - } - tlsConfigs[provider] = config - } - opts.TLSConfig = tlsConfigs + opts.TLSConfig = cfg + return nil }) + return c } diff --git a/mongo/options/clientoptions.go b/mongo/options/clientoptions.go index bbca8b0ddb..007d235390 100644 --- a/mongo/options/clientoptions.go +++ b/mongo/options/clientoptions.go @@ -18,6 +18,7 @@ import ( "math" "net" "net/http" + "reflect" "strings" "time" @@ -197,11 +198,11 @@ type BSONOptions struct { // BinaryAsSlice causes the driver to unmarshal BSON binary field values // that are the "Generic" or "Old" BSON binary subtype as a Go byte slice - // instead of a primitive.Binary. + // instead of a bson.Binary. BinaryAsSlice bool // DefaultDocumentM causes the driver to always unmarshal documents into the - // primitive.M type. This behavior is restricted to data typed as + // bson.M type. This behavior is restricted to data typed as // "interface{}" or "map[string]interface{}". DefaultDocumentM bool @@ -223,24 +224,36 @@ type BSONOptions struct { ZeroStructs bool } +// DriverInfo appends the client metadata generated by the driver when +// handshaking the server. These options do not replace the values used +// during the handshake, rather they are deliminated with a | with the +// driver-generated data. This should be used by libraries wrapping the driver, +// e.g. ODMs. +type DriverInfo struct { + Name string // Name of the library wrapping the driver. + Version string // Version of the library wrapping the driver. + Platform string // Platform information for the wrapping driver. +} + // ClientOptions contains arguments to configure a Client instance. Arguments // can be set through the ClientOptions setter functions. See each function for // documentation. type ClientOptions struct { AppName *string Auth *Credential - AutoEncryptionOptions Lister[AutoEncryptionOptions] + AutoEncryptionOptions *AutoEncryptionOptions ConnectTimeout *time.Duration Compressors []string Dialer ContextDialer Direct *bool DisableOCSPEndpointCheck *bool + DriverInfo *DriverInfo HeartbeatInterval *time.Duration Hosts []string HTTPClient *http.Client LoadBalanced *bool LocalThreshold *time.Duration - LoggerOptions Lister[LoggerOptions] + LoggerOptions *LoggerOptions MaxConnIdleTime *time.Duration MaxPoolSize *uint64 MinPoolSize *uint64 @@ -255,7 +268,7 @@ type ClientOptions struct { ReplicaSet *string RetryReads *bool RetryWrites *bool - ServerAPIOptions Lister[ServerAPIOptions] + ServerAPIOptions *ServerAPIOptions ServerMonitoringMode *string ServerSelectionTimeout *time.Duration SRVMaxHosts *int @@ -280,38 +293,17 @@ type ClientOptions struct { Deployment driver.Deployment connString *connstring.ConnString -} - -// ClientOptionsBuilder contains options to configure a Client instance. Each -// option can be set through setter functions. See documentation for each setter -// function for an explanation of the option. -type ClientOptionsBuilder struct { - Opts []func(*ClientOptions) error + err error } // Client creates a new ClientOptions instance. -func Client() *ClientOptionsBuilder { - opts := &ClientOptionsBuilder{} +func Client() *ClientOptions { + opts := &ClientOptions{} opts = opts.SetHTTPClient(httputil.DefaultHTTPClient) return opts } -// List returns a list of ClientOptions setter functions. -func (c *ClientOptionsBuilder) List() []func(*ClientOptions) error { - return c.Opts -} - -// GetURI returns the original URI used to configure the ClientOptions instance. -// If ApplyURI was not called during construction, this returns "". -func (opts *ClientOptions) GetURI() string { - if opts.connString == nil { - return "" - } - - return opts.connString.Original -} - func setURIOpts(uri string, opts *ClientOptions) error { connString, err := connstring.ParseAndValidate(uri) if err != nil { @@ -517,11 +509,9 @@ func setURIOpts(uri string, opts *ClientOptions) error { // GetURI returns the original URI used to configure the ClientOptions instance. // If ApplyURI was not called during construction, this returns "". -func (c *ClientOptionsBuilder) GetURI() string { - args, _ := getOptions[ClientOptions](c) - - if args != nil && args.connString != nil { - return args.connString.Original +func (c *ClientOptions) GetURI() string { + if c != nil && c.connString != nil { + return c.connString.Original } return "" @@ -529,102 +519,96 @@ func (c *ClientOptionsBuilder) GetURI() string { // Validate validates the client options. This method will return the first // error found. -func (c *ClientOptionsBuilder) Validate() error { - args, err := getOptions[ClientOptions](c) - if err != nil { - return err +func (c *ClientOptions) Validate() error { + if c.err != nil { + return c.err } // Direct connections cannot be made if multiple hosts are specified or an SRV // URI is used. - if args.Direct != nil && *args.Direct { - if len(args.Hosts) > 1 { + if c.Direct != nil && *c.Direct { + if len(c.Hosts) > 1 { return errors.New("a direct connection cannot be made if multiple hosts are specified") } - if args.connString != nil && args.connString.Scheme == connstring.SchemeMongoDBSRV { + if c.connString != nil && c.connString.Scheme == connstring.SchemeMongoDBSRV { return errors.New("a direct connection cannot be made if an SRV URI is used") } } - if args.HeartbeatInterval != nil && *args.HeartbeatInterval < (500*time.Millisecond) { + if c.HeartbeatInterval != nil && *c.HeartbeatInterval < (500*time.Millisecond) { return fmt.Errorf("heartbeatFrequencyMS must exceed the minimum heartbeat interval of 500ms, got heartbeatFrequencyMS=%q", - *args.HeartbeatInterval) + *c.HeartbeatInterval) } - if args.MaxPoolSize != nil && args.MinPoolSize != nil && *args.MaxPoolSize != 0 && - *args.MinPoolSize > *args.MaxPoolSize { + if c.MaxPoolSize != nil && c.MinPoolSize != nil && *c.MaxPoolSize != 0 && + *c.MinPoolSize > *c.MaxPoolSize { return fmt.Errorf("minPoolSize must be less than or equal to maxPoolSize, got minPoolSize=%d maxPoolSize=%d", - *args.MinPoolSize, *args.MaxPoolSize) + *c.MinPoolSize, *c.MaxPoolSize) } // verify server API version if ServerAPIOptions are passed in. - if args.ServerAPIOptions != nil { - serverAPIopts, err := getOptions[ServerAPIOptions](args.ServerAPIOptions) - if err != nil { - return fmt.Errorf("failed to construct options from builder: %w", err) - } - - if err := serverAPIopts.ServerAPIVersion.Validate(); err != nil { + if c.ServerAPIOptions != nil { + if err := c.ServerAPIOptions.ServerAPIVersion.Validate(); err != nil { return err } } // Validation for load-balanced mode. - if args.LoadBalanced != nil && *args.LoadBalanced { - if len(args.Hosts) > 1 { + if c.LoadBalanced != nil && *c.LoadBalanced { + if len(c.Hosts) > 1 { return connstring.ErrLoadBalancedWithMultipleHosts } - if args.ReplicaSet != nil { + if c.ReplicaSet != nil { return connstring.ErrLoadBalancedWithReplicaSet } - if args.Direct != nil && *args.Direct { + if c.Direct != nil && *c.Direct { return connstring.ErrLoadBalancedWithDirectConnection } } // Validation for srvMaxHosts. - if args.SRVMaxHosts != nil && *args.SRVMaxHosts > 0 { - if args.ReplicaSet != nil { + if c.SRVMaxHosts != nil && *c.SRVMaxHosts > 0 { + if c.ReplicaSet != nil { return connstring.ErrSRVMaxHostsWithReplicaSet } - if args.LoadBalanced != nil && *args.LoadBalanced { + if c.LoadBalanced != nil && *c.LoadBalanced { return connstring.ErrSRVMaxHostsWithLoadBalanced } } - if mode := args.ServerMonitoringMode; mode != nil && !connstring.IsValidServerMonitoringMode(*mode) { + if mode := c.ServerMonitoringMode; mode != nil && !connstring.IsValidServerMonitoringMode(*mode) { return fmt.Errorf("invalid server monitoring mode: %q", *mode) } - if to := args.Timeout; to != nil && *to < 0 { + if to := c.Timeout; to != nil && *to < 0 { return fmt.Errorf(`invalid value %q for "Timeout": value must be positive`, *to) } // OIDC Validation - if args.Auth != nil && args.Auth.AuthMechanism == auth.MongoDBOIDC { - if args.Auth.Password != "" { + if c.Auth != nil && c.Auth.AuthMechanism == auth.MongoDBOIDC { + if c.Auth.Password != "" { return fmt.Errorf("password must not be set for the %s auth mechanism", auth.MongoDBOIDC) } - if args.Auth.OIDCMachineCallback != nil && args.Auth.OIDCHumanCallback != nil { + if c.Auth.OIDCMachineCallback != nil && c.Auth.OIDCHumanCallback != nil { return fmt.Errorf("cannot set both OIDCMachineCallback and OIDCHumanCallback, only one may be specified") } - if args.Auth.OIDCHumanCallback == nil && args.Auth.AuthMechanismProperties[auth.AllowedHostsProp] != "" { + if c.Auth.OIDCHumanCallback == nil && c.Auth.AuthMechanismProperties[auth.AllowedHostsProp] != "" { return fmt.Errorf("Cannot specify ALLOWED_HOSTS without an OIDCHumanCallback") } - if env, ok := args.Auth.AuthMechanismProperties[auth.EnvironmentProp]; ok { + if env, ok := c.Auth.AuthMechanismProperties[auth.EnvironmentProp]; ok { switch env { case auth.GCPEnvironmentValue, auth.AzureEnvironmentValue: - if args.Auth.OIDCMachineCallback != nil { + if c.Auth.OIDCMachineCallback != nil { return fmt.Errorf("OIDCMachineCallback cannot be specified with the %s %q", env, auth.EnvironmentProp) } - if args.Auth.OIDCHumanCallback != nil { + if c.Auth.OIDCHumanCallback != nil { return fmt.Errorf("OIDCHumanCallback cannot be specified with the %s %q", env, auth.EnvironmentProp) } - if args.Auth.AuthMechanismProperties[auth.ResourceProp] == "" { + if c.Auth.AuthMechanismProperties[auth.ResourceProp] == "" { return fmt.Errorf("%q must be set for the %s %q", auth.ResourceProp, env, auth.EnvironmentProp) } default: - if args.Auth.AuthMechanismProperties[auth.ResourceProp] != "" { + if c.Auth.AuthMechanismProperties[auth.ResourceProp] != "" { return fmt.Errorf("%q must not be set for the %s %q", auth.ResourceProp, env, auth.EnvironmentProp) } } @@ -648,10 +632,12 @@ func (c *ClientOptionsBuilder) Validate() error { // // For more information about the URI format, see https://www.mongodb.com/docs/manual/reference/connection-string/. See // mongo.Connect documentation for examples of using URIs for different Client configurations. -func (c *ClientOptionsBuilder) ApplyURI(uri string) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - return setURIOpts(uri, opts) - }) +func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { + if c.err != nil { + return c + } + + c.err = setURIOpts(uri, c) return c } @@ -659,12 +645,8 @@ func (c *ClientOptionsBuilder) ApplyURI(uri string) *ClientOptionsBuilder { // SetAppName specifies an application name that is sent to the server when creating new connections. It is used by the // server to log connection and profiling information (e.g. slow query logs). This can also be set through the "appName" // URI option (e.g "appName=example_application"). The default is empty, meaning no app name will be sent. -func (c *ClientOptionsBuilder) SetAppName(s string) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.AppName = &s - - return nil - }) +func (c *ClientOptions) SetAppName(s string) *ClientOptions { + c.AppName = &s return c } @@ -672,12 +654,8 @@ func (c *ClientOptionsBuilder) SetAppName(s string) *ClientOptionsBuilder { // SetAuth specifies a Credential containing options for configuring authentication. See the options.Credential // documentation for more information about Credential fields. The default is an empty Credential, meaning no // authentication will be configured. -func (c *ClientOptionsBuilder) SetAuth(auth Credential) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.Auth = &auth - - return nil - }) +func (c *ClientOptions) SetAuth(auth Credential) *ClientOptions { + c.Auth = &auth return c } @@ -698,12 +676,8 @@ func (c *ClientOptionsBuilder) SetAuth(auth Credential) *ClientOptionsBuilder { // // This can also be set through the "compressors" URI option (e.g. "compressors=zstd,zlib,snappy"). The default is // an empty slice, meaning no compression will be enabled. -func (c *ClientOptionsBuilder) SetCompressors(comps []string) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.Compressors = comps - - return nil - }) +func (c *ClientOptions) SetCompressors(comps []string) *ClientOptions { + c.Compressors = comps return c } @@ -711,12 +685,8 @@ func (c *ClientOptionsBuilder) SetCompressors(comps []string) *ClientOptionsBuil // SetConnectTimeout specifies a timeout that is used for creating connections to the server. This can be set through // ApplyURI with the "connectTimeoutMS" (e.g "connectTimeoutMS=30") option. If set to 0, no timeout will be used. The // default is 30 seconds. -func (c *ClientOptionsBuilder) SetConnectTimeout(d time.Duration) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.ConnectTimeout = &d - - return nil - }) +func (c *ClientOptions) SetConnectTimeout(d time.Duration) *ClientOptions { + c.ConnectTimeout = &d return c } @@ -724,12 +694,8 @@ func (c *ClientOptionsBuilder) SetConnectTimeout(d time.Duration) *ClientOptions // SetDialer specifies a custom ContextDialer to be used to create new connections to the server. This method overrides // the default net.Dialer, so dialer options such as Timeout, KeepAlive, Resolver, etc can be set. // See https://golang.org/pkg/net/#Dialer for more information about the net.Dialer type. -func (c *ClientOptionsBuilder) SetDialer(d ContextDialer) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.Dialer = d - - return nil - }) +func (c *ClientOptions) SetDialer(d ContextDialer) *ClientOptions { + c.Dialer = d return c } @@ -749,12 +715,8 @@ func (c *ClientOptionsBuilder) SetDialer(d ContextDialer) *ClientOptionsBuilder // If the "connect" and "directConnection" URI options are both specified in the connection string, their values must // not conflict. Direct connections are not valid if multiple hosts are specified or an SRV URI is used. The default // value for this option is false. -func (c *ClientOptionsBuilder) SetDirect(b bool) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.Direct = &b - - return nil - }) +func (c *ClientOptions) SetDirect(b bool) *ClientOptions { + c.Direct = &b return c } @@ -762,12 +724,8 @@ func (c *ClientOptionsBuilder) SetDirect(b bool) *ClientOptionsBuilder { // SetHeartbeatInterval specifies the amount of time to wait between periodic background server checks. This can also be // set through the "heartbeatFrequencyMS" URI option (e.g. "heartbeatFrequencyMS=10000"). The default is 10 seconds. // The minimum is 500ms. -func (c *ClientOptionsBuilder) SetHeartbeatInterval(d time.Duration) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.HeartbeatInterval = &d - - return nil - }) +func (c *ClientOptions) SetHeartbeatInterval(d time.Duration) *ClientOptions { + c.HeartbeatInterval = &d return c } @@ -777,12 +735,8 @@ func (c *ClientOptionsBuilder) SetHeartbeatInterval(d time.Duration) *ClientOpti // // Hosts can also be specified as a comma-separated list in a URI. For example, to include "localhost:27017" and // "localhost:27018", a URI could be "mongodb://localhost:27017,localhost:27018". The default is ["localhost:27017"] -func (c *ClientOptionsBuilder) SetHosts(s []string) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.Hosts = s - - return nil - }) +func (c *ClientOptions) SetHosts(s []string) *ClientOptions { + c.Hosts = s return c } @@ -797,12 +751,8 @@ func (c *ClientOptionsBuilder) SetHosts(s []string) *ClientOptionsBuilder { // 3. The options specify whether or not a direct connection should be made, either via the URI or the SetDirect method. // // The default value is false. -func (c *ClientOptionsBuilder) SetLoadBalanced(lb bool) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.LoadBalanced = &lb - - return nil - }) +func (c *ClientOptions) SetLoadBalanced(lb bool) *ClientOptions { + c.LoadBalanced = &lb return c } @@ -811,24 +761,16 @@ func (c *ClientOptionsBuilder) SetLoadBalanced(lb bool) *ClientOptionsBuilder { // operation, this is the acceptable non-negative delta between shortest and longest average round-trip times. A server // within the latency window is selected randomly. This can also be set through the "localThresholdMS" URI option (e.g. // "localThresholdMS=15000"). The default is 15 milliseconds. -func (c *ClientOptionsBuilder) SetLocalThreshold(d time.Duration) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.LocalThreshold = &d - - return nil - }) +func (c *ClientOptions) SetLocalThreshold(d time.Duration) *ClientOptions { + c.LocalThreshold = &d return c } // SetLoggerOptions specifies a LoggerOptions containing options for // configuring a logger. -func (c *ClientOptionsBuilder) SetLoggerOptions(lopts Lister[LoggerOptions]) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.LoggerOptions = lopts - - return nil - }) +func (c *ClientOptions) SetLoggerOptions(lopts *LoggerOptions) *ClientOptions { + c.LoggerOptions = lopts return c } @@ -836,12 +778,8 @@ func (c *ClientOptionsBuilder) SetLoggerOptions(lopts Lister[LoggerOptions]) *Cl // SetMaxConnIdleTime specifies the maximum amount of time that a connection will remain idle in a connection pool // before it is removed from the pool and closed. This can also be set through the "maxIdleTimeMS" URI option (e.g. // "maxIdleTimeMS=10000"). The default is 0, meaning a connection can remain unused indefinitely. -func (c *ClientOptionsBuilder) SetMaxConnIdleTime(d time.Duration) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.MaxConnIdleTime = &d - - return nil - }) +func (c *ClientOptions) SetMaxConnIdleTime(d time.Duration) *ClientOptions { + c.MaxConnIdleTime = &d return c } @@ -849,12 +787,8 @@ func (c *ClientOptionsBuilder) SetMaxConnIdleTime(d time.Duration) *ClientOption // SetMaxPoolSize specifies that maximum number of connections allowed in the driver's connection pool to each server. // Requests to a server will block if this maximum is reached. This can also be set through the "maxPoolSize" URI option // (e.g. "maxPoolSize=100"). If this is 0, maximum connection pool size is not limited. The default is 100. -func (c *ClientOptionsBuilder) SetMaxPoolSize(u uint64) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.MaxPoolSize = &u - - return nil - }) +func (c *ClientOptions) SetMaxPoolSize(u uint64) *ClientOptions { + c.MaxPoolSize = &u return c } @@ -862,12 +796,8 @@ func (c *ClientOptionsBuilder) SetMaxPoolSize(u uint64) *ClientOptionsBuilder { // SetMinPoolSize specifies the minimum number of connections allowed in the driver's connection pool to each server. If // this is non-zero, each server's pool will be maintained in the background to ensure that the size does not fall below // the minimum. This can also be set through the "minPoolSize" URI option (e.g. "minPoolSize=100"). The default is 0. -func (c *ClientOptionsBuilder) SetMinPoolSize(u uint64) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.MinPoolSize = &u - - return nil - }) +func (c *ClientOptions) SetMinPoolSize(u uint64) *ClientOptions { + c.MinPoolSize = &u return c } @@ -875,47 +805,31 @@ func (c *ClientOptionsBuilder) SetMinPoolSize(u uint64) *ClientOptionsBuilder { // SetMaxConnecting specifies the maximum number of connections a connection pool may establish simultaneously. This can // also be set through the "maxConnecting" URI option (e.g. "maxConnecting=2"). If this is 0, the default is used. The // default is 2. Values greater than 100 are not recommended. -func (c *ClientOptionsBuilder) SetMaxConnecting(u uint64) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.MaxConnecting = &u - - return nil - }) +func (c *ClientOptions) SetMaxConnecting(u uint64) *ClientOptions { + c.MaxConnecting = &u return c } // SetPoolMonitor specifies a PoolMonitor to receive connection pool events. See the event.PoolMonitor documentation // for more information about the structure of the monitor and events that can be received. -func (c *ClientOptionsBuilder) SetPoolMonitor(m *event.PoolMonitor) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.PoolMonitor = m - - return nil - }) +func (c *ClientOptions) SetPoolMonitor(m *event.PoolMonitor) *ClientOptions { + c.PoolMonitor = m return c } // SetMonitor specifies a CommandMonitor to receive command events. See the event.CommandMonitor documentation for more // information about the structure of the monitor and events that can be received. -func (c *ClientOptionsBuilder) SetMonitor(m *event.CommandMonitor) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.Monitor = m - - return nil - }) +func (c *ClientOptions) SetMonitor(m *event.CommandMonitor) *ClientOptions { + c.Monitor = m return c } // SetServerMonitor specifies an SDAM monitor used to monitor SDAM events. -func (c *ClientOptionsBuilder) SetServerMonitor(m *event.ServerMonitor) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.ServerMonitor = m - - return nil - }) +func (c *ClientOptions) SetServerMonitor(m *event.ServerMonitor) *ClientOptions { + c.ServerMonitor = m return c } @@ -923,12 +837,8 @@ func (c *ClientOptionsBuilder) SetServerMonitor(m *event.ServerMonitor) *ClientO // SetReadConcern specifies the read concern to use for read operations. A read concern level can also be set through // the "readConcernLevel" URI option (e.g. "readConcernLevel=majority"). The default is nil, meaning the server will use // its configured default. -func (c *ClientOptionsBuilder) SetReadConcern(rc *readconcern.ReadConcern) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.ReadConcern = rc - - return nil - }) +func (c *ClientOptions) SetReadConcern(rc *readconcern.ReadConcern) *ClientOptions { + c.ReadConcern = rc return c } @@ -946,35 +856,24 @@ func (c *ClientOptionsBuilder) SetReadConcern(rc *readconcern.ReadConcern) *Clie // // The default is readpref.Primary(). See https://www.mongodb.com/docs/manual/core/read-preference/#read-preference for // more information about read preferences. -func (c *ClientOptionsBuilder) SetReadPreference(rp *readpref.ReadPref) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.ReadPreference = rp - - return nil - }) +func (c *ClientOptions) SetReadPreference(rp *readpref.ReadPref) *ClientOptions { + c.ReadPreference = rp return c } // SetBSONOptions configures optional BSON marshaling and unmarshaling behavior. -func (c *ClientOptionsBuilder) SetBSONOptions(bopts *BSONOptions) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.BSONOptions = bopts - - return nil - }) +func (c *ClientOptions) SetBSONOptions(bopts *BSONOptions) *ClientOptions { + c.BSONOptions = bopts return c } // SetRegistry specifies the BSON registry to use for BSON marshalling/unmarshalling operations. The default is // bson.NewRegistry(). -func (c *ClientOptionsBuilder) SetRegistry(registry *bson.Registry) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.Registry = registry +func (c *ClientOptions) SetRegistry(registry *bson.Registry) *ClientOptions { + c.Registry = registry - return nil - }) return c } @@ -983,12 +882,8 @@ func (c *ClientOptionsBuilder) SetRegistry(registry *bson.Registry) *ClientOptio // ApplyURI or SetHosts. All nodes in the replica set must have the same replica set name, or they will not be // considered as part of the set by the Client. This can also be set through the "replicaSet" URI option (e.g. // "replicaSet=replset"). The default is empty. -func (c *ClientOptionsBuilder) SetReplicaSet(s string) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.ReplicaSet = &s - - return nil - }) +func (c *ClientOptions) SetReplicaSet(s string) *ClientOptions { + c.ReplicaSet = &s return c } @@ -1004,12 +899,8 @@ func (c *ClientOptionsBuilder) SetReplicaSet(s string) *ClientOptionsBuilder { // This option requires server version >= 3.6 and a replica set or sharded cluster and will be ignored for any other // cluster type. This can also be set through the "retryWrites" URI option (e.g. "retryWrites=true"). The default is // true. -func (c *ClientOptionsBuilder) SetRetryWrites(b bool) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.RetryWrites = &b - - return nil - }) +func (c *ClientOptions) SetRetryWrites(b bool) *ClientOptions { + c.RetryWrites = &b return c } @@ -1022,12 +913,8 @@ func (c *ClientOptionsBuilder) SetRetryWrites(b bool) *ClientOptionsBuilder { // operations run through RunCommand are not retried. // // This option requires server version >= 3.6 and driver version >= 1.1.0. The default is true. -func (c *ClientOptionsBuilder) SetRetryReads(b bool) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.RetryReads = &b - - return nil - }) +func (c *ClientOptions) SetRetryReads(b bool) *ClientOptions { + c.RetryReads = &b return c } @@ -1035,12 +922,8 @@ func (c *ClientOptionsBuilder) SetRetryReads(b bool) *ClientOptionsBuilder { // SetServerSelectionTimeout specifies how long the driver will wait to find an available, suitable server to execute an // operation. This can also be set through the "serverSelectionTimeoutMS" URI option (e.g. // "serverSelectionTimeoutMS=30000"). The default value is 30 seconds. -func (c *ClientOptionsBuilder) SetServerSelectionTimeout(d time.Duration) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.ServerSelectionTimeout = &d - - return nil - }) +func (c *ClientOptions) SetServerSelectionTimeout(d time.Duration) *ClientOptions { + c.ServerSelectionTimeout = &d return c } @@ -1056,12 +939,8 @@ func (c *ClientOptionsBuilder) SetServerSelectionTimeout(d time.Duration) *Clien // If any Timeout is set (even 0) on the Client, the values of MaxTime on // operation options, TransactionOptions.MaxCommitTime and // SessionOptions.DefaultMaxCommitTime will be ignored. -func (c *ClientOptionsBuilder) SetTimeout(d time.Duration) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.Timeout = &d - - return nil - }) +func (c *ClientOptions) SetTimeout(d time.Duration) *ClientOptions { + c.Timeout = &d return c } @@ -1090,12 +969,8 @@ func (c *ClientOptionsBuilder) SetTimeout(d time.Duration) *ClientOptionsBuilder // man-in-the-middle attacks and should only be done for testing. // // The default is nil, meaning no TLS will be enabled. -func (c *ClientOptionsBuilder) SetTLSConfig(cfg *tls.Config) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.TLSConfig = cfg - - return nil - }) +func (c *ClientOptions) SetTLSConfig(cfg *tls.Config) *ClientOptions { + c.TLSConfig = cfg return c } @@ -1103,12 +978,8 @@ func (c *ClientOptionsBuilder) SetTLSConfig(cfg *tls.Config) *ClientOptionsBuild // SetHTTPClient specifies the http.Client to be used for any HTTP requests. // // This should only be used to set custom HTTP client configurations. By default, the connection will use an httputil.DefaultHTTPClient. -func (c *ClientOptionsBuilder) SetHTTPClient(client *http.Client) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.HTTPClient = client - - return nil - }) +func (c *ClientOptions) SetHTTPClient(client *http.Client) *ClientOptions { + c.HTTPClient = client return c } @@ -1127,12 +998,8 @@ func (c *ClientOptionsBuilder) SetHTTPClient(client *http.Client) *ClientOptions // returning (e.g. "journal=true"). // // The default is nil, meaning the server will use its configured default. -func (c *ClientOptionsBuilder) SetWriteConcern(wc *writeconcern.WriteConcern) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.WriteConcern = wc - - return nil - }) +func (c *ClientOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *ClientOptions { + c.WriteConcern = wc return c } @@ -1141,12 +1008,8 @@ func (c *ClientOptionsBuilder) SetWriteConcern(wc *writeconcern.WriteConcern) *C // compressor through ApplyURI or SetCompressors. Supported values are -1 through 9, inclusive. -1 tells the zlib // library to use its default, 0 means no compression, 1 means best speed, and 9 means best compression. // This can also be set through the "zlibCompressionLevel" URI option (e.g. "zlibCompressionLevel=-1"). Defaults to -1. -func (c *ClientOptionsBuilder) SetZlibLevel(level int) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.ZlibLevel = &level - - return nil - }) +func (c *ClientOptions) SetZlibLevel(level int) *ClientOptions { + c.ZlibLevel = &level return c } @@ -1154,12 +1017,8 @@ func (c *ClientOptionsBuilder) SetZlibLevel(level int) *ClientOptionsBuilder { // SetZstdLevel sets the level for the zstd compressor. This option is ignored if zstd is not specified as a compressor // through ApplyURI or SetCompressors. Supported values are 1 through 20, inclusive. 1 means best speed and 20 means // best compression. This can also be set through the "zstdCompressionLevel" URI option. Defaults to 6. -func (c *ClientOptionsBuilder) SetZstdLevel(level int) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.ZstdLevel = &level - - return nil - }) +func (c *ClientOptions) SetZstdLevel(level int) *ClientOptions { + c.ZstdLevel = &level return c } @@ -1167,12 +1026,8 @@ func (c *ClientOptionsBuilder) SetZstdLevel(level int) *ClientOptionsBuilder { // SetAutoEncryptionOptions specifies an AutoEncryptionOptions instance to automatically encrypt and decrypt commands // and their results. See the options.AutoEncryptionOptions documentation for more information about the supported // options. -func (c *ClientOptionsBuilder) SetAutoEncryptionOptions(aeopts Lister[AutoEncryptionOptions]) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.AutoEncryptionOptions = aeopts - - return nil - }) +func (c *ClientOptions) SetAutoEncryptionOptions(aeopts *AutoEncryptionOptions) *ClientOptions { + c.AutoEncryptionOptions = aeopts return c } @@ -1186,12 +1041,8 @@ func (c *ClientOptionsBuilder) SetAutoEncryptionOptions(aeopts Lister[AutoEncryp // // This can also be set through the tlsDisableOCSPEndpointCheck URI option. Both this URI option and tlsInsecure must // not be set at the same time and will error if they are. The default value is false. -func (c *ClientOptionsBuilder) SetDisableOCSPEndpointCheck(disableCheck bool) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.DisableOCSPEndpointCheck = &disableCheck - - return nil - }) +func (c *ClientOptions) SetDisableOCSPEndpointCheck(disableCheck bool) *ClientOptions { + c.DisableOCSPEndpointCheck = &disableCheck return c } @@ -1199,12 +1050,8 @@ func (c *ClientOptionsBuilder) SetDisableOCSPEndpointCheck(disableCheck bool) *C // SetServerAPIOptions specifies a ServerAPIOptions instance used to configure the API version sent to the server // when running commands. See the options.ServerAPIOptions documentation for more information about the supported // options. -func (c *ClientOptionsBuilder) SetServerAPIOptions(sopts Lister[ServerAPIOptions]) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.ServerAPIOptions = sopts - - return nil - }) +func (c *ClientOptions) SetServerAPIOptions(sopts *ServerAPIOptions) *ClientOptions { + c.ServerAPIOptions = sopts return c } @@ -1213,12 +1060,8 @@ func (c *ClientOptionsBuilder) SetServerAPIOptions(sopts Lister[ServerAPIOptions // the helper constants ServerMonitoringModeAuto, ServerMonitoringModePoll, and // ServerMonitoringModeStream for more information about valid server // monitoring modes. -func (c *ClientOptionsBuilder) SetServerMonitoringMode(mode string) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.ServerMonitoringMode = &mode - - return nil - }) +func (c *ClientOptions) SetServerMonitoringMode(mode string) *ClientOptions { + c.ServerMonitoringMode = &mode return c } @@ -1226,12 +1069,8 @@ func (c *ClientOptionsBuilder) SetServerMonitoringMode(mode string) *ClientOptio // SetSRVMaxHosts specifies the maximum number of SRV results to randomly select during polling. To limit the number // of hosts selected in SRV discovery, this function must be called before ApplyURI. This can also be set through // the "srvMaxHosts" URI option. -func (c *ClientOptionsBuilder) SetSRVMaxHosts(srvMaxHosts int) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.SRVMaxHosts = &srvMaxHosts - - return nil - }) +func (c *ClientOptions) SetSRVMaxHosts(srvMaxHosts int) *ClientOptions { + c.SRVMaxHosts = &srvMaxHosts return c } @@ -1239,12 +1078,17 @@ func (c *ClientOptionsBuilder) SetSRVMaxHosts(srvMaxHosts int) *ClientOptionsBui // SetSRVServiceName specifies a custom SRV service name to use in SRV polling. To use a custom SRV service name // in SRV discovery, this function must be called before ApplyURI. This can also be set through the "srvServiceName" // URI option. -func (c *ClientOptionsBuilder) SetSRVServiceName(srvName string) *ClientOptionsBuilder { - c.Opts = append(c.Opts, func(opts *ClientOptions) error { - opts.SRVServiceName = &srvName +func (c *ClientOptions) SetSRVServiceName(srvName string) *ClientOptions { + c.SRVServiceName = &srvName - return nil - }) + return c +} + +// SetDriverInfo configures optional data to include in the handshake's client +// metadata, delimited by "|" with the driver-generated data. This should be +// used by libraries wrapping the driver, e.g. ODMs. +func (c *ClientOptions) SetDriverInfo(info *DriverInfo) *ClientOptions { + c.DriverInfo = info return c } @@ -1414,3 +1258,41 @@ func extractX509UsernameFromSubject(subject string) string { return strings.Join(pairs, ",") } + +// MergeClientOptions combines the given *ClientOptions into a single +// *ClientOptions in a last one wins fashion. The specified options are merged +// with the existing options on the client, with the specified options taking +// precedence. +func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { + if len(opts) == 1 { + return opts[0] + } + + c := Client() + for _, opt := range opts { + if opt == nil { + continue + } + optValue := reflect.ValueOf(opt).Elem() + cValue := reflect.ValueOf(c).Elem() + for i := 0; i < optValue.NumField(); i++ { + field := optValue.Field(i) + fieldType := optValue.Type().Field(i) + // Check if the field is exported and can be set + if field.CanSet() && fieldType.PkgPath == "" && !field.IsZero() { + cValue.Field(i).Set(field) + } + } + + // Manually handle unexported fields + if opt.err != nil { + c.err = opt.err + } + + if opt.connString != nil { + c.connString = opt.connString + } + } + + return c +} diff --git a/mongo/options/clientoptions_test.go b/mongo/options/clientoptions_test.go index 3dc5ca7c45..61bfa7dd0a 100644 --- a/mongo/options/clientoptions_test.go +++ b/mongo/options/clientoptions_test.go @@ -34,8 +34,7 @@ import ( "go.mongodb.org/mongo-driver/v2/x/mongo/driver/connstring" ) -var tClientOptions = reflect.TypeOf(&ClientOptionsBuilder{}) -var tClientopts = reflect.TypeOf(&ClientOptions{}) +var tClientOptions = reflect.TypeOf(&ClientOptions{}) func TestClientOptions(t *testing.T) { t.Run("ApplyURI/doesn't overwrite previous errors", func(t *testing.T) { @@ -57,32 +56,32 @@ func TestClientOptions(t *testing.T) { field string // field to be set dereference bool // Should we compare a pointer or the field }{ - {"AppName", (*ClientOptionsBuilder).SetAppName, "example-application", "AppName", true}, - {"Auth", (*ClientOptionsBuilder).SetAuth, Credential{Username: "foo", Password: "bar"}, "Auth", true}, - {"Compressors", (*ClientOptionsBuilder).SetCompressors, []string{"zstd", "snappy", "zlib"}, "Compressors", true}, - {"ConnectTimeout", (*ClientOptionsBuilder).SetConnectTimeout, 5 * time.Second, "ConnectTimeout", true}, - {"Dialer", (*ClientOptionsBuilder).SetDialer, testDialer{Num: 12345}, "Dialer", true}, - {"HeartbeatInterval", (*ClientOptionsBuilder).SetHeartbeatInterval, 5 * time.Second, "HeartbeatInterval", true}, - {"Hosts", (*ClientOptionsBuilder).SetHosts, []string{"localhost:27017", "localhost:27018", "localhost:27019"}, "Hosts", true}, - {"LocalThreshold", (*ClientOptionsBuilder).SetLocalThreshold, 5 * time.Second, "LocalThreshold", true}, - {"MaxConnIdleTime", (*ClientOptionsBuilder).SetMaxConnIdleTime, 5 * time.Second, "MaxConnIdleTime", true}, - {"MaxPoolSize", (*ClientOptionsBuilder).SetMaxPoolSize, uint64(250), "MaxPoolSize", true}, - {"MinPoolSize", (*ClientOptionsBuilder).SetMinPoolSize, uint64(10), "MinPoolSize", true}, - {"MaxConnecting", (*ClientOptionsBuilder).SetMaxConnecting, uint64(10), "MaxConnecting", true}, - {"PoolMonitor", (*ClientOptionsBuilder).SetPoolMonitor, &event.PoolMonitor{}, "PoolMonitor", false}, - {"Monitor", (*ClientOptionsBuilder).SetMonitor, &event.CommandMonitor{}, "Monitor", false}, - {"ReadConcern", (*ClientOptionsBuilder).SetReadConcern, readconcern.Majority(), "ReadConcern", false}, - {"ReadPreference", (*ClientOptionsBuilder).SetReadPreference, readpref.SecondaryPreferred(), "ReadPreference", false}, - {"Registry", (*ClientOptionsBuilder).SetRegistry, bson.NewRegistry(), "Registry", false}, - {"ReplicaSet", (*ClientOptionsBuilder).SetReplicaSet, "example-replicaset", "ReplicaSet", true}, - {"RetryWrites", (*ClientOptionsBuilder).SetRetryWrites, true, "RetryWrites", true}, - {"ServerSelectionTimeout", (*ClientOptionsBuilder).SetServerSelectionTimeout, 5 * time.Second, "ServerSelectionTimeout", true}, - {"Direct", (*ClientOptionsBuilder).SetDirect, true, "Direct", true}, - {"TLSConfig", (*ClientOptionsBuilder).SetTLSConfig, &tls.Config{}, "TLSConfig", false}, - {"WriteConcern", (*ClientOptionsBuilder).SetWriteConcern, writeconcern.Majority(), "WriteConcern", false}, - {"ZlibLevel", (*ClientOptionsBuilder).SetZlibLevel, 6, "ZlibLevel", true}, - {"DisableOCSPEndpointCheck", (*ClientOptionsBuilder).SetDisableOCSPEndpointCheck, true, "DisableOCSPEndpointCheck", true}, - {"LoadBalanced", (*ClientOptionsBuilder).SetLoadBalanced, true, "LoadBalanced", true}, + {"AppName", (*ClientOptions).SetAppName, "example-application", "AppName", true}, + {"Auth", (*ClientOptions).SetAuth, Credential{Username: "foo", Password: "bar"}, "Auth", true}, + {"Compressors", (*ClientOptions).SetCompressors, []string{"zstd", "snappy", "zlib"}, "Compressors", true}, + {"ConnectTimeout", (*ClientOptions).SetConnectTimeout, 5 * time.Second, "ConnectTimeout", true}, + {"Dialer", (*ClientOptions).SetDialer, testDialer{Num: 12345}, "Dialer", true}, + {"HeartbeatInterval", (*ClientOptions).SetHeartbeatInterval, 5 * time.Second, "HeartbeatInterval", true}, + {"Hosts", (*ClientOptions).SetHosts, []string{"localhost:27017", "localhost:27018", "localhost:27019"}, "Hosts", true}, + {"LocalThreshold", (*ClientOptions).SetLocalThreshold, 5 * time.Second, "LocalThreshold", true}, + {"MaxConnIdleTime", (*ClientOptions).SetMaxConnIdleTime, 5 * time.Second, "MaxConnIdleTime", true}, + {"MaxPoolSize", (*ClientOptions).SetMaxPoolSize, uint64(250), "MaxPoolSize", true}, + {"MinPoolSize", (*ClientOptions).SetMinPoolSize, uint64(10), "MinPoolSize", true}, + {"MaxConnecting", (*ClientOptions).SetMaxConnecting, uint64(10), "MaxConnecting", true}, + {"PoolMonitor", (*ClientOptions).SetPoolMonitor, &event.PoolMonitor{}, "PoolMonitor", false}, + {"Monitor", (*ClientOptions).SetMonitor, &event.CommandMonitor{}, "Monitor", false}, + {"ReadConcern", (*ClientOptions).SetReadConcern, readconcern.Majority(), "ReadConcern", false}, + {"ReadPreference", (*ClientOptions).SetReadPreference, readpref.SecondaryPreferred(), "ReadPreference", false}, + {"Registry", (*ClientOptions).SetRegistry, bson.NewRegistry(), "Registry", false}, + {"ReplicaSet", (*ClientOptions).SetReplicaSet, "example-replicaset", "ReplicaSet", true}, + {"RetryWrites", (*ClientOptions).SetRetryWrites, true, "RetryWrites", true}, + {"ServerSelectionTimeout", (*ClientOptions).SetServerSelectionTimeout, 5 * time.Second, "ServerSelectionTimeout", true}, + {"Direct", (*ClientOptions).SetDirect, true, "Direct", true}, + {"TLSConfig", (*ClientOptions).SetTLSConfig, &tls.Config{}, "TLSConfig", false}, + {"WriteConcern", (*ClientOptions).SetWriteConcern, writeconcern.Majority(), "WriteConcern", false}, + {"ZlibLevel", (*ClientOptions).SetZlibLevel, 6, "ZlibLevel", true}, + {"DisableOCSPEndpointCheck", (*ClientOptions).SetDisableOCSPEndpointCheck, true, "DisableOCSPEndpointCheck", true}, + {"LoadBalanced", (*ClientOptions).SetLoadBalanced, true, "LoadBalanced", true}, } opt1, opt2, optResult := Client(), Client(), Client() @@ -95,19 +94,20 @@ func TestClientOptions(t *testing.T) { if fn.Type().NumIn() < 2 || fn.Type().In(0) != tClientOptions { t.Fatal("fn argument must have a *ClientOptions as the first argument and one other argument") } - if _, exists := tClientopts.Elem().FieldByName(tc.field); !exists { + if _, exists := tClientOptions.Elem().FieldByName(tc.field); !exists { t.Fatalf("field (%s) does not exist in ClientOptions", tc.field) } - opts := make([]reflect.Value, 2) - opts[0] = reflect.New(tClientOptions.Elem()) + args := make([]reflect.Value, 2) + client := reflect.New(tClientOptions.Elem()) + args[0] = client want := reflect.ValueOf(tc.arg) - opts[1] = want + args[1] = want if !want.IsValid() || !want.CanInterface() { t.Fatal("arg property of test case must be valid") } - _ = fn.Call(opts) + _ = fn.Call(args) // To avoid duplication we're piggybacking on the Set* tests to make the // MergeClientOptions test simpler and more thorough. @@ -116,42 +116,17 @@ func TestClientOptions(t *testing.T) { // the result option. This gives us coverage of options set by the first option, by // the second, and by both. if idx%2 != 0 { - opts[0] = reflect.ValueOf(opt1) - _ = fn.Call(opts) + args[0] = reflect.ValueOf(opt1) + _ = fn.Call(args) } if idx%2 == 0 || idx%3 == 0 { - opts[0] = reflect.ValueOf(opt2) - _ = fn.Call(opts) + args[0] = reflect.ValueOf(opt2) + _ = fn.Call(args) } - opts[0] = reflect.ValueOf(optResult) - _ = fn.Call(opts) + args[0] = reflect.ValueOf(optResult) + _ = fn.Call(args) - optsValue := opts[0].Elem().FieldByName("Opts") - - // Ensure the value is a slice - if optsValue.Kind() != reflect.Slice { - t.Fatalf("expected the options to be a slice") - } - - setters := make([]func(*ClientOptions) error, optsValue.Len()) - - // Iterate over the reflect.Value and extract each function - for i := 0; i < optsValue.Len(); i++ { - elem := optsValue.Index(i) - if elem.Kind() != reflect.Func { - t.Fatalf("expected all elements of opts to be functions") - } - - setters[i] = elem.Interface().(func(*ClientOptions) error) - } - - clientopts := &ClientOptions{} - for _, set := range setters { - err := set(clientopts) - assert.NoError(t, err) - } - - got := reflect.ValueOf(clientopts).Elem().FieldByName(tc.field) + got := client.Elem().FieldByName(tc.field) if !got.IsValid() || !got.CanInterface() { t.Fatal("cannot create concrete instance from retrieved field") } @@ -171,6 +146,35 @@ func TestClientOptions(t *testing.T) { } }) } + + t.Run("MergeClientOptions/all set", func(t *testing.T) { + want := optResult + got := MergeClientOptions(nil, opt1, opt2) + if diff := cmp.Diff( + got, want, + cmp.AllowUnexported(readconcern.ReadConcern{}, writeconcern.WriteConcern{}, readpref.ReadPref{}), + cmp.Comparer(func(r1, r2 *bson.Registry) bool { return r1 == r2 }), + cmp.Comparer(func(cfg1, cfg2 *tls.Config) bool { return cfg1 == cfg2 }), + cmp.Comparer(func(fp1, fp2 *event.PoolMonitor) bool { return fp1 == fp2 }), + cmp.AllowUnexported(ClientOptions{}), + cmpopts.IgnoreFields(http.Client{}, "Transport"), + ); diff != "" { + t.Errorf("diff:\n%s", diff) + t.Errorf("Merged client options do not match. got %v; want %v", got, want) + } + }) + + // go-cmp dont support error comparisons (https://github.com/google/go-cmp/issues/24) + // Use specifique test for this + t.Run("MergeClientOptions/err", func(t *testing.T) { + opt1, opt2 := Client(), Client() + opt1.err = errors.New("Test error") + + got := MergeClientOptions(nil, opt1, opt2) + if got.err.Error() != "Test error" { + t.Errorf("Merged client options do not match. got %v; want %v", got.err.Error(), opt1.err.Error()) + } + }) }) t.Run("direct connection validation", func(t *testing.T) { t.Run("multiple hosts", func(t *testing.T) { @@ -178,7 +182,7 @@ func TestClientOptions(t *testing.T) { testCases := []struct { name string - opts *ClientOptionsBuilder + opts *ClientOptions }{ {"hosts in URI", Client().ApplyURI("mongodb://localhost,localhost2")}, {"hosts in options", Client().SetHosts([]string{"localhost", "localhost2"})}, @@ -194,21 +198,11 @@ func TestClientOptions(t *testing.T) { t.Run("srv", func(t *testing.T) { expectedErr := errors.New("a direct connection cannot be made if an SRV URI is used") // Use a non-SRV URI and manually set the scheme because using an SRV URI would force an SRV lookup. - optsBldr := Client().ApplyURI("mongodb://localhost:27017") - - args, err := getOptions[ClientOptions](optsBldr) - assert.NoError(t, err) + opts := Client().ApplyURI("mongodb://localhost:27017") - args.connString.Scheme = connstring.SchemeMongoDBSRV + opts.connString.Scheme = connstring.SchemeMongoDBSRV - newOpts := &ClientOptionsBuilder{} - newOpts.Opts = append(newOpts.Opts, func(ca *ClientOptions) error { - *ca = *args - - return nil - }) - - err = newOpts.SetDirect(true).Validate() + err := opts.SetDirect(true).Validate() assert.NotNil(t, err, "expected error, got nil") assert.Equal(t, expectedErr.Error(), err.Error(), "expected error %v, got %v", expectedErr, err) }) @@ -216,7 +210,7 @@ func TestClientOptions(t *testing.T) { t.Run("loadBalanced validation", func(t *testing.T) { testCases := []struct { name string - opts *ClientOptionsBuilder + opts *ClientOptions err error }{ {"multiple hosts in URI", Client().ApplyURI("mongodb://foo,bar"), connstring.ErrLoadBalancedWithMultipleHosts}, @@ -243,7 +237,7 @@ func TestClientOptions(t *testing.T) { t.Run("heartbeatFrequencyMS validation", func(t *testing.T) { testCases := []struct { name string - opts *ClientOptionsBuilder + opts *ClientOptions err error }{ { @@ -297,7 +291,7 @@ func TestClientOptions(t *testing.T) { t.Run("minPoolSize validation", func(t *testing.T) { testCases := []struct { name string - opts *ClientOptionsBuilder + opts *ClientOptions err error }{ { @@ -331,7 +325,7 @@ func TestClientOptions(t *testing.T) { t.Run("srvMaxHosts validation", func(t *testing.T) { testCases := []struct { name string - opts *ClientOptionsBuilder + opts *ClientOptions err error }{ {"replica set name", Client().SetReplicaSet("foo"), connstring.ErrSRVMaxHostsWithReplicaSet}, @@ -358,7 +352,7 @@ func TestClientOptions(t *testing.T) { testCases := []struct { name string - opts *ClientOptionsBuilder + opts *ClientOptions err error }{ { @@ -393,7 +387,7 @@ func TestClientOptions(t *testing.T) { testCases := []struct { name string - opts *ClientOptionsBuilder + opts *ClientOptions err error }{ { @@ -443,7 +437,7 @@ func TestClientOptions(t *testing.T) { testCases := []struct { name string - opts *ClientOptionsBuilder + opts *ClientOptions err error }{ { @@ -536,7 +530,7 @@ func TestClientOptions(t *testing.T) { testCases := []struct { name string - opts *ClientOptionsBuilder + opts *ClientOptions err error }{ { @@ -715,24 +709,19 @@ func compareErrors(err1, err2 error) bool { return true } -func TestSetURIopts(t *testing.T) { +func TestApplyURI(t *testing.T) { t.Parallel() testCases := []struct { name string uri string wantopts *ClientOptions - - // A list of possible errors that can be returned, required to account for - // OS-specific errors. - wantErrs []error }{ { - name: "ParseError", - uri: "not-mongo-db-uri://", - wantopts: &ClientOptions{}, - wantErrs: []error{ - fmt.Errorf( + name: "ParseError", + uri: "not-mongo-db-uri://", + wantopts: &ClientOptions{ + err: fmt.Errorf( "error parsing uri: %w", errors.New(`scheme must be "mongodb" or "mongodb+srv"`)), }, @@ -742,9 +731,7 @@ func TestSetURIopts(t *testing.T) { uri: "mongodb://localhost/?maxStaleness=200", wantopts: &ClientOptions{ Hosts: []string{"localhost"}, - }, - wantErrs: []error{ - fmt.Errorf("unknown read preference %v", ""), + err: fmt.Errorf("unknown read preference %v", ""), }, }, { @@ -752,9 +739,7 @@ func TestSetURIopts(t *testing.T) { uri: "mongodb://localhost/?readPreference=Primary&maxStaleness=200", wantopts: &ClientOptions{ Hosts: []string{"localhost"}, - }, - wantErrs: []error{ - errors.New("can not specify tags, max staleness, or hedge with mode primary"), + err: errors.New("can not specify tags, max staleness, or hedge with mode primary"), }, }, { @@ -762,19 +747,7 @@ func TestSetURIopts(t *testing.T) { uri: "mongodb://localhost/?ssl=true&sslCertificateAuthorityFile=testdata/doesntexist", wantopts: &ClientOptions{ Hosts: []string{"localhost"}, - }, - wantErrs: []error{ - &os.PathError{ - Op: "open", - Path: "testdata/doesntexist", - Err: errors.New("no such file or directory"), - }, - &os.PathError{ - Op: "open", - Path: "testdata/doesntexist", - // Windows error - Err: errors.New("The system cannot find the file specified."), //nolint:revive - }, + err: &os.PathError{Op: "open", Path: "testdata/doesntexist"}, }, }, { @@ -782,19 +755,7 @@ func TestSetURIopts(t *testing.T) { uri: "mongodb://localhost/?ssl=true&sslClientCertificateKeyFile=testdata/doesntexist", wantopts: &ClientOptions{ Hosts: []string{"localhost"}, - }, - wantErrs: []error{ - &os.PathError{ - Op: "open", - Path: "testdata/doesntexist", - Err: errors.New("no such file or directory"), - }, - &os.PathError{ - Op: "open", - Path: "testdata/doesntexist", - // Windows error - Err: errors.New("The system cannot find the file specified."), //nolint:revive - }, + err: &os.PathError{Op: "open", Path: "testdata/doesntexist"}, }, }, { @@ -803,8 +764,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, AppName: ptrutil.Ptr[string]("awesome-example-application"), + err: nil, }, - wantErrs: nil, }, { name: "AuthMechanism", @@ -812,8 +773,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, Auth: &Credential{AuthSource: "$external", AuthMechanism: "mongodb-x509"}, + err: nil, }, - wantErrs: nil, }, { name: "AuthMechanismProperties", @@ -826,8 +787,8 @@ func TestSetURIopts(t *testing.T) { AuthMechanismProperties: map[string]string{"SERVICE_NAME": "mongodb-fake"}, Username: "foo", }, + err: nil, }, - wantErrs: nil, }, { name: "AuthSource", @@ -835,8 +796,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, Auth: &Credential{AuthSource: "random-database-example", Username: "foo"}, + err: nil, }, - wantErrs: nil, }, { name: "Username", @@ -844,15 +805,14 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, Auth: &Credential{AuthSource: "admin", Username: "foo"}, + err: nil, }, - wantErrs: nil, }, { - name: "Unescaped slash in username", - uri: "mongodb:///:pwd@localhost", - wantopts: &ClientOptions{}, - wantErrs: []error{ - fmt.Errorf("error parsing uri: %w", errors.New("unescaped slash in username")), + name: "Unescaped slash in username", + uri: "mongodb:///:pwd@localhost", + wantopts: &ClientOptions{ + err: fmt.Errorf("error parsing uri: %w", errors.New("unescaped slash in username")), }, }, { @@ -864,8 +824,8 @@ func TestSetURIopts(t *testing.T) { AuthSource: "admin", Username: "foo", Password: "bar", PasswordSet: true, }, + err: nil, }, - wantErrs: nil, }, { name: "Single character username and password", @@ -876,8 +836,8 @@ func TestSetURIopts(t *testing.T) { AuthSource: "admin", Username: "f", Password: "b", PasswordSet: true, }, + err: nil, }, - wantErrs: nil, }, { name: "Connect", @@ -885,8 +845,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, Direct: ptrutil.Ptr[bool](true), + err: nil, }, - wantErrs: nil, }, { name: "ConnectTimeout", @@ -894,8 +854,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, ConnectTimeout: ptrutil.Ptr[time.Duration](5 * time.Second), + err: nil, }, - wantErrs: nil, }, { name: "Compressors", @@ -904,16 +864,16 @@ func TestSetURIopts(t *testing.T) { Hosts: []string{"localhost"}, Compressors: []string{"zlib", "snappy"}, ZlibLevel: ptrutil.Ptr[int](6), + err: nil, }, - wantErrs: nil, }, { name: "DatabaseNoAuth", uri: "mongodb://localhost/example-database", wantopts: &ClientOptions{ Hosts: []string{"localhost"}, + err: nil, }, - wantErrs: nil, }, { name: "DatabaseAsDefault", @@ -921,8 +881,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, Auth: &Credential{AuthSource: "example-database", Username: "foo"}, + err: nil, }, - wantErrs: nil, }, { name: "HeartbeatInterval", @@ -930,16 +890,16 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, HeartbeatInterval: ptrutil.Ptr[time.Duration](12 * time.Second), + err: nil, }, - wantErrs: nil, }, { name: "Hosts", uri: "mongodb://localhost:27017,localhost:27018,localhost:27019/", wantopts: &ClientOptions{ Hosts: []string{"localhost:27017", "localhost:27018", "localhost:27019"}, + err: nil, }, - wantErrs: nil, }, { name: "LocalThreshold", @@ -947,8 +907,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, LocalThreshold: ptrutil.Ptr[time.Duration](200 * time.Millisecond), + err: nil, }, - wantErrs: nil, }, { name: "MaxConnIdleTime", @@ -956,8 +916,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, MaxConnIdleTime: ptrutil.Ptr[time.Duration](5 * time.Minute), + err: nil, }, - wantErrs: nil, }, { name: "MaxPoolSize", @@ -965,8 +925,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, MaxPoolSize: ptrutil.Ptr[uint64](256), + err: nil, }, - wantErrs: nil, }, { name: "MinPoolSize", @@ -974,8 +934,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, MinPoolSize: ptrutil.Ptr[uint64](256), + err: nil, }, - wantErrs: nil, }, { name: "MaxConnecting", @@ -983,8 +943,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, MaxConnecting: ptrutil.Ptr[uint64](10), + err: nil, }, - wantErrs: nil, }, { name: "ReadConcern", @@ -992,8 +952,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, ReadConcern: readconcern.Linearizable(), + err: nil, }, - wantErrs: nil, }, { name: "ReadPreference", @@ -1001,8 +961,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, ReadPreference: readpref.SecondaryPreferred(), + err: nil, }, - wantErrs: nil, }, { name: "ReadPreferenceTagSets", @@ -1010,8 +970,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, ReadPreference: readpref.SecondaryPreferred(readpref.WithTags("foo", "bar")), + err: nil, }, - wantErrs: nil, }, { name: "MaxStaleness", @@ -1019,8 +979,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, ReadPreference: readpref.SecondaryPreferred(readpref.WithMaxStaleness(250 * time.Second)), + err: nil, }, - wantErrs: nil, }, { name: "RetryWrites", @@ -1028,8 +988,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, RetryWrites: ptrutil.Ptr[bool](true), + err: nil, }, - wantErrs: nil, }, { name: "ReplicaSet", @@ -1037,8 +997,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, ReplicaSet: ptrutil.Ptr[string]("rs01"), + err: nil, }, - wantErrs: nil, }, { name: "ServerSelectionTimeout", @@ -1046,16 +1006,16 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, ServerSelectionTimeout: ptrutil.Ptr[time.Duration](45 * time.Second), + err: nil, }, - wantErrs: nil, }, { name: "SocketTimeout", uri: "mongodb://localhost/?socketTimeoutMS=15000", wantopts: &ClientOptions{ Hosts: []string{"localhost"}, + err: nil, }, - wantErrs: nil, }, { name: "TLS CACertificate", @@ -1065,8 +1025,8 @@ func TestSetURIopts(t *testing.T) { TLSConfig: &tls.Config{ RootCAs: createCertPool(t, "testdata/ca.pem"), }, + err: nil, }, - wantErrs: nil, }, { name: "TLS Insecure", @@ -1076,8 +1036,8 @@ func TestSetURIopts(t *testing.T) { TLSConfig: &tls.Config{ InsecureSkipVerify: true, }, + err: nil, }, - wantErrs: nil, }, { name: "TLS ClientCertificateKey", @@ -1087,8 +1047,8 @@ func TestSetURIopts(t *testing.T) { TLSConfig: &tls.Config{ Certificates: make([]tls.Certificate, 1), }, + err: nil, }, - wantErrs: nil, }, { name: "TLS ClientCertificateKey with password", @@ -1098,8 +1058,8 @@ func TestSetURIopts(t *testing.T) { TLSConfig: &tls.Config{ Certificates: make([]tls.Certificate, 1), }, + err: nil, }, - wantErrs: nil, }, { name: "TLS Username", @@ -1110,8 +1070,8 @@ func TestSetURIopts(t *testing.T) { AuthMechanism: "mongodb-x509", AuthSource: "$external", Username: `C=US,ST=New York,L=New York City, Inc,O=MongoDB\,OU=WWW`, }, + err: nil, }, - wantErrs: nil, }, { name: "WriteConcern J", @@ -1119,8 +1079,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, WriteConcern: writeconcern.Journaled(), + err: nil, }, - wantErrs: nil, }, { name: "WriteConcern WString", @@ -1128,8 +1088,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, WriteConcern: writeconcern.Majority(), + err: nil, }, - wantErrs: nil, }, { name: "WriteConcern W", @@ -1137,16 +1097,16 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, WriteConcern: &writeconcern.WriteConcern{W: 3}, + err: nil, }, - wantErrs: nil, }, { name: "WriteConcern WTimeout", uri: "mongodb://localhost/?wTimeoutMS=45000", wantopts: &ClientOptions{ Hosts: []string{"localhost"}, + err: nil, }, - wantErrs: nil, }, { name: "ZLibLevel", @@ -1154,8 +1114,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, ZlibLevel: ptrutil.Ptr[int](4), + err: nil, }, - wantErrs: nil, }, { name: "TLS tlsCertificateFile and tlsPrivateKeyFile", @@ -1165,35 +1125,32 @@ func TestSetURIopts(t *testing.T) { TLSConfig: &tls.Config{ Certificates: make([]tls.Certificate, 1), }, + err: nil, }, - wantErrs: nil, }, { - name: "TLS only tlsCertificateFile", - uri: "mongodb://localhost/?tlsCertificateFile=testdata/nopass/cert.pem", - wantopts: &ClientOptions{}, - wantErrs: []error{ - fmt.Errorf( + name: "TLS only tlsCertificateFile", + uri: "mongodb://localhost/?tlsCertificateFile=testdata/nopass/cert.pem", + wantopts: &ClientOptions{ + err: fmt.Errorf( "error validating uri: %w", errors.New("the tlsPrivateKeyFile URI option must be provided if the tlsCertificateFile option is specified")), }, }, { - name: "TLS only tlsPrivateKeyFile", - uri: "mongodb://localhost/?tlsPrivateKeyFile=testdata/nopass/key.pem", - wantopts: &ClientOptions{}, - wantErrs: []error{ - fmt.Errorf( + name: "TLS only tlsPrivateKeyFile", + uri: "mongodb://localhost/?tlsPrivateKeyFile=testdata/nopass/key.pem", + wantopts: &ClientOptions{ + err: fmt.Errorf( "error validating uri: %w", errors.New("the tlsCertificateFile URI option must be provided if the tlsPrivateKeyFile option is specified")), }, }, { - name: "TLS tlsCertificateFile and tlsPrivateKeyFile and tlsCertificateKeyFile", - uri: "mongodb://localhost/?tlsCertificateFile=testdata/nopass/cert.pem&tlsPrivateKeyFile=testdata/nopass/key.pem&tlsCertificateKeyFile=testdata/nopass/certificate.pem", - wantopts: &ClientOptions{}, - wantErrs: []error{ - fmt.Errorf( + name: "TLS tlsCertificateFile and tlsPrivateKeyFile and tlsCertificateKeyFile", + uri: "mongodb://localhost/?tlsCertificateFile=testdata/nopass/cert.pem&tlsPrivateKeyFile=testdata/nopass/key.pem&tlsCertificateKeyFile=testdata/nopass/certificate.pem", + wantopts: &ClientOptions{ + err: fmt.Errorf( "error validating uri: %w", errors.New("the sslClientCertificateKeyFile/tlsCertificateKeyFile URI option cannot be provided "+ "along with tlsCertificateFile or tlsPrivateKeyFile")), @@ -1205,8 +1162,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, DisableOCSPEndpointCheck: ptrutil.Ptr[bool](true), + err: nil, }, - wantErrs: nil, }, { name: "directConnection", @@ -1214,8 +1171,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, Direct: ptrutil.Ptr[bool](true), + err: nil, }, - wantErrs: nil, }, { name: "TLS CA file with multiple certificiates", @@ -1226,17 +1183,15 @@ func TestSetURIopts(t *testing.T) { RootCAs: createCertPool(t, "testdata/ca-with-intermediates-first.pem", "testdata/ca-with-intermediates-second.pem", "testdata/ca-with-intermediates-third.pem"), }, + err: nil, }, - wantErrs: nil, }, { name: "TLS empty CA file", uri: "mongodb://localhost/?tlsCAFile=testdata/empty-ca.pem", wantopts: &ClientOptions{ Hosts: []string{"localhost"}, - }, - wantErrs: []error{ - errors.New("the specified CA file does not contain any valid certificates"), + err: errors.New("the specified CA file does not contain any valid certificates"), }, }, { @@ -1244,9 +1199,7 @@ func TestSetURIopts(t *testing.T) { uri: "mongodb://localhost/?tlsCAFile=testdata/ca-key.pem", wantopts: &ClientOptions{ Hosts: []string{"localhost"}, - }, - wantErrs: []error{ - errors.New("the specified CA file does not contain any valid certificates"), + err: errors.New("the specified CA file does not contain any valid certificates"), }, }, { @@ -1254,9 +1207,7 @@ func TestSetURIopts(t *testing.T) { uri: "mongodb://localhost/?tlsCAFile=testdata/malformed-ca.pem", wantopts: &ClientOptions{ Hosts: []string{"localhost"}, - }, - wantErrs: []error{ - errors.New("the specified CA file does not contain any valid certificates"), + err: errors.New("the specified CA file does not contain any valid certificates"), }, }, { @@ -1265,8 +1216,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, LoadBalanced: ptrutil.Ptr[bool](true), + err: nil, }, - wantErrs: nil, }, { name: "loadBalanced=false", @@ -1274,8 +1225,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, LoadBalanced: ptrutil.Ptr[bool](false), + err: nil, }, - wantErrs: nil, }, { name: "srvServiceName", @@ -1283,8 +1234,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost.test.build.10gen.cc:27017", "localhost.test.build.10gen.cc:27018"}, SRVServiceName: ptrutil.Ptr[string]("customname"), + err: nil, }, - wantErrs: nil, }, { name: "srvMaxHosts", @@ -1292,8 +1243,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost.test.build.10gen.cc:27017", "localhost.test.build.10gen.cc:27018"}, SRVMaxHosts: ptrutil.Ptr[int](2), + err: nil, }, - wantErrs: nil, }, { name: "GODRIVER-2263 regression test", @@ -1301,8 +1252,8 @@ func TestSetURIopts(t *testing.T) { wantopts: &ClientOptions{ Hosts: []string{"localhost"}, TLSConfig: &tls.Config{Certificates: make([]tls.Certificate, 1)}, + err: nil, }, - wantErrs: nil, }, { name: "GODRIVER-2650 X509 certificate", @@ -1315,17 +1266,15 @@ func TestSetURIopts(t *testing.T) { Username: `C=US,ST=New York,L=New York City,O=MongoDB,OU=Drivers,CN=localhost`, }, TLSConfig: &tls.Config{Certificates: make([]tls.Certificate, 1)}, + err: nil, }, - wantErrs: nil, }, { name: "ALLOWED_HOSTS cannot be specified in URI connection", uri: "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:example.com", wantopts: &ClientOptions{ HTTPClient: httputil.DefaultHTTPClient, - }, - wantErrs: []error{ - errors.New(`error validating uri: ALLOWED_HOSTS cannot be specified in the URI connection string for the "MONGODB-OIDC" auth mechanism, it must be specified through the ClientOptions directly`), + err: errors.New(`error validating uri: ALLOWED_HOSTS cannot be specified in the URI connection string for the "MONGODB-OIDC" auth mechanism, it must be specified through the ClientOptions directly`), }, }, { @@ -1335,8 +1284,8 @@ func TestSetURIopts(t *testing.T) { Hosts: []string{"example.com"}, Auth: &Credential{AuthMechanism: "MONGODB-OIDC", AuthSource: "$external", AuthMechanismProperties: map[string]string{"TOKEN_RESOURCE": "mongodb://test-cluster"}}, HTTPClient: httputil.DefaultHTTPClient, + err: nil, }, - wantErrs: nil, }, { name: "oidc azure", @@ -1347,8 +1296,8 @@ func TestSetURIopts(t *testing.T) { "ENVIRONMENT": "azureManagedIdentities", "TOKEN_RESOURCE": "mongodb://test-cluster"}}, HTTPClient: httputil.DefaultHTTPClient, + err: nil, }, - wantErrs: nil, }, { name: "oidc gcp", @@ -1359,71 +1308,40 @@ func TestSetURIopts(t *testing.T) { "ENVIRONMENT": "gcp", "TOKEN_RESOURCE": "mongodb://test-cluster"}}, HTTPClient: httputil.DefaultHTTPClient, + err: nil, }, - wantErrs: nil, }, { name: "comma in key:value pair causes error", uri: "mongodb://example.com/?authMechanismProperties=TOKEN_RESOURCE:mongodb://host1%2Chost2", wantopts: &ClientOptions{ HTTPClient: httputil.DefaultHTTPClient, - }, - wantErrs: []error{ - errors.New(`error parsing uri: invalid authMechanism property`), + err: errors.New(`error parsing uri: invalid authMechanism property`), }, }, } for _, test := range testCases { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() + result := Client().ApplyURI(test.uri) - // Manually add the URI and ConnString to the test expectations to avoid - // adding them in each test definition. The ConnString should only be - // recorded if there was no error while parsing. - connString, err := connstring.ParseAndValidate(test.uri) + // Manually add the URI and ConnString to the test expectations to avoid adding them in each test + // definition. The ConnString should only be recorded if there was no error while parsing. + cs, err := connstring.ParseAndValidate(test.uri) if err == nil { - test.wantopts.connString = connString + test.wantopts.connString = cs } - // Also manually add the default HTTP client if one does not exist. if test.wantopts.HTTPClient == nil { - test.wantopts.HTTPClient = http.DefaultClient - } - - // Use the setURIopts to just test that a correct error is returned. - if gotErr := setURIOpts(test.uri, &ClientOptions{}); test.wantErrs != nil { - var foundError bool - - for _, err := range test.wantErrs { - if err.Error() == gotErr.Error() { - foundError = true - - break - } - } - - assert.True(t, foundError, "expected error to be one of %v, got: %v", test.wantErrs, gotErr) - } - - // Run this test through the client.ApplyURI method to ensure that it - // remains a naive wrapper. - opts := Client().ApplyURI(test.uri) - - gotopts := &ClientOptions{} - for _, setter := range opts.Opts { - _ = setter(gotopts) + test.wantopts.HTTPClient = httputil.DefaultHTTPClient } - // We have to sort string slices in comparison, as Hosts resolved from SRV - // URIs do not have a set order. + // We have to sort string slices in comparison, as Hosts resolved from SRV URIs do not have a set order. stringLess := func(a, b string) bool { return a < b } if diff := cmp.Diff( - test.wantopts, gotopts, + test.wantopts, result, cmp.AllowUnexported(ClientOptions{}, readconcern.ReadConcern{}, writeconcern.WriteConcern{}, readpref.ReadPref{}), - // cmp.Comparer(func(r1, r2 *bsoncodec.Registry) bool { return r1 == r2 }), + cmp.Comparer(func(r1, r2 *bson.Registry) bool { return r1 == r2 }), cmp.Comparer(compareTLSConfig), cmp.Comparer(compareErrors), cmpopts.SortSlices(stringLess), diff --git a/mongo/options/distinctoptions.go b/mongo/options/distinctoptions.go index e54922711b..0b9167d5d0 100644 --- a/mongo/options/distinctoptions.go +++ b/mongo/options/distinctoptions.go @@ -13,6 +13,7 @@ package options type DistinctOptions struct { Collation *Collation Comment interface{} + Hint interface{} } // DistinctOptionsBuilder contains options to configure distinct operations. Each @@ -60,3 +61,21 @@ func (do *DistinctOptionsBuilder) SetComment(comment interface{}) *DistinctOptio return do } + +// SetHint specifies the index to use for the operation. This should either be +// the index name as a string or the index specification as a document. This +// option is only valid for MongoDB versions >= 7.1. Previous server versions +// will return an error if an index hint is specified. Distinct returns an error +// if the hint parameter is a multi-key map. The default value is nil, which +// means that no index hint will be sent. +// +// SetHint sets the Hint field. +func (do *DistinctOptionsBuilder) SetHint(hint interface{}) *DistinctOptionsBuilder { + do.Opts = append(do.Opts, func(opts *DistinctOptions) error { + opts.Hint = hint + + return nil + }) + + return do +} diff --git a/mongo/options/encryptoptions.go b/mongo/options/encryptoptions.go index 46d8d46fdd..5a45ac16ed 100644 --- a/mongo/options/encryptoptions.go +++ b/mongo/options/encryptoptions.go @@ -128,7 +128,7 @@ func Encrypt() *EncryptOptionsBuilder { return &EncryptOptionsBuilder{} } -// SetKeyID specifies an _id of a data key. This should be a UUID (a primitive.Binary with subtype 4). +// SetKeyID specifies an _id of a data key. This should be a UUID (a bson.Binary with subtype 4). func (e *EncryptOptionsBuilder) SetKeyID(keyID bson.Binary) *EncryptOptionsBuilder { e.Opts = append(e.Opts, func(opts *EncryptOptions) error { opts.KeyID = &keyID diff --git a/mongo/options/example_test.go b/mongo/options/example_test.go index 49b403498f..a4cb92ee11 100644 --- a/mongo/options/example_test.go +++ b/mongo/options/example_test.go @@ -37,7 +37,7 @@ func (logger *CustomLogger) Error(err error, msg string, _ ...interface{}) { fmt.Fprintf(logger, "err=%v msg=%s\n", err, msg) } -func ExampleClientOptionsBuilder_SetLoggerOptions_customLogger() { +func ExampleClientOptions_SetLoggerOptions_customLogger() { buf := bytes.NewBuffer(nil) sink := &CustomLogger{Writer: buf} diff --git a/mongo/options/findoptions.go b/mongo/options/findoptions.go index 52ad8e5313..e036b9ed64 100644 --- a/mongo/options/findoptions.go +++ b/mongo/options/findoptions.go @@ -22,6 +22,7 @@ type FindOptions struct { Max interface{} MaxAwaitTime *time.Duration Min interface{} + OplogReplay *bool Projection interface{} ReturnKey *bool ShowRecordID *bool @@ -200,6 +201,19 @@ func (f *FindOptionsBuilder) SetNoCursorTimeout(b bool) *FindOptionsBuilder { return f } +// SetOplogReplay sets the value for the OplogReplay field. OplogReplay is for internal +// replication use only and should not be set. +// +// Deprecated: This option has been deprecated in MongoDB version 4.4 and will be ignored by +// the server if it is set. +func (f *FindOptionsBuilder) SetOplogReplay(b bool) *FindOptionsBuilder { + f.Opts = append(f.Opts, func(opts *FindOptions) error { + opts.OplogReplay = &b + return nil + }) + return f +} + // SetProjection sets the value for the Projection field. Projection is a document describing // which fields will be included in the documents returned by the Find operation. The // default value is nil, which means all fields will be included. @@ -265,6 +279,7 @@ type FindOneOptions struct { Hint interface{} Max interface{} Min interface{} + OplogReplay *bool Projection interface{} ReturnKey *bool ShowRecordID *bool @@ -354,6 +369,19 @@ func (f *FindOneOptionsBuilder) SetMin(min interface{}) *FindOneOptionsBuilder { return f } +// SetOplogReplay sets the value for the OplogReplay field. OplogReplay is for internal +// replication use only and should not be set. +// +// Deprecated: This option has been deprecated in MongoDB version 4.4 and will be ignored by +// the server if it is set. +func (f *FindOneOptionsBuilder) SetOplogReplay(b bool) *FindOneOptionsBuilder { + f.Opts = append(f.Opts, func(opts *FindOneOptions) error { + opts.OplogReplay = &b + return nil + }) + return f +} + // SetProjection sets the value for the Projection field. Sets a document describing which fields // will be included in the document returned by the operation. The default value is nil, which // means all fields will be included. diff --git a/mongo/options/lister.go b/mongo/options/lister.go index d5c76f580d..3b3783b4b4 100644 --- a/mongo/options/lister.go +++ b/mongo/options/lister.go @@ -11,19 +11,3 @@ package options type Lister[T any] interface { List() []func(*T) error } - -func getOptions[T any](mopts Lister[T]) (*T, error) { - opts := new(T) - - for _, setterFn := range mopts.List() { - if setterFn == nil { - continue - } - - if err := setterFn(opts); err != nil { - return nil, err - } - } - - return opts, nil -} diff --git a/mongo/options/loggeroptions.go b/mongo/options/loggeroptions.go index 081721391d..3a5c0806ad 100644 --- a/mongo/options/loggeroptions.go +++ b/mongo/options/loggeroptions.go @@ -78,36 +78,20 @@ type LoggerOptions struct { MaxDocumentLength uint } -// LoggerOptionsBuilder contains options to configure a logger. Each option can -// be set through setter functions. See documentation for each setter function -// for an explanation of the option. -type LoggerOptionsBuilder struct { - Opts []func(*LoggerOptions) error -} - // Logger creates a new LoggerOptions instance. -func Logger() *LoggerOptionsBuilder { - return &LoggerOptionsBuilder{} -} - -// List returns a list of LoggerOptions setter functions. -func (opts *LoggerOptionsBuilder) List() []func(*LoggerOptions) error { - return opts.Opts +func Logger() *LoggerOptions { + return &LoggerOptions{} } // SetComponentLevel sets the LogLevel value for a LogComponent. ComponentLevels is a map of // LogComponent to LogLevel. The LogLevel for a given LogComponent will be used to determine // if a log message should be logged. -func (opts *LoggerOptionsBuilder) SetComponentLevel(component LogComponent, level LogLevel) *LoggerOptionsBuilder { - opts.Opts = append(opts.Opts, func(opts *LoggerOptions) error { - if opts.ComponentLevels == nil { - opts.ComponentLevels = map[LogComponent]LogLevel{} - } +func (opts *LoggerOptions) SetComponentLevel(component LogComponent, level LogLevel) *LoggerOptions { + if opts.ComponentLevels == nil { + opts.ComponentLevels = map[LogComponent]LogLevel{} + } - opts.ComponentLevels[component] = level - - return nil - }) + opts.ComponentLevels[component] = level return opts } @@ -115,12 +99,8 @@ func (opts *LoggerOptionsBuilder) SetComponentLevel(component LogComponent, leve // SetMaxDocumentLength sets the maximum length of a document to be logged. Sink is the // LogSink that will be used to log messages. If this is nil, the driver will use the // standard logging library. -func (opts *LoggerOptionsBuilder) SetMaxDocumentLength(maxDocumentLength uint) *LoggerOptionsBuilder { - opts.Opts = append(opts.Opts, func(opts *LoggerOptions) error { - opts.MaxDocumentLength = maxDocumentLength - - return nil - }) +func (opts *LoggerOptions) SetMaxDocumentLength(maxDocumentLength uint) *LoggerOptions { + opts.MaxDocumentLength = maxDocumentLength return opts } @@ -128,12 +108,8 @@ func (opts *LoggerOptionsBuilder) SetMaxDocumentLength(maxDocumentLength uint) * // SetSink sets the LogSink to use for logging. MaxDocumentLength is the maximum length // of a document to be logged. If the underlying document is larger than this value, it // will be truncated and appended with an ellipses "...". -func (opts *LoggerOptionsBuilder) SetSink(sink LogSink) *LoggerOptionsBuilder { - opts.Opts = append(opts.Opts, func(opts *LoggerOptions) error { - opts.Sink = sink - - return nil - }) +func (opts *LoggerOptions) SetSink(sink LogSink) *LoggerOptions { + opts.Sink = sink return opts } diff --git a/mongo/options/serverapioptions.go b/mongo/options/serverapioptions.go index 116041a3b3..8f38dbc9e7 100644 --- a/mongo/options/serverapioptions.go +++ b/mongo/options/serverapioptions.go @@ -29,50 +29,22 @@ type ServerAPIOptions struct { DeprecationErrors *bool } -// ServerAPIOptionsBuilder contains options to configure serverAPI operations. -// Each option can be set through setter functions. See documentation for each -// setter function for an explanation of the option. -type ServerAPIOptionsBuilder struct { - Opts []func(*ServerAPIOptions) error -} - // ServerAPI creates a new ServerAPIOptions configured with the provided // serverAPIversion. -func ServerAPI(serverAPIVersion ServerAPIVersion) *ServerAPIOptionsBuilder { - opts := &ServerAPIOptionsBuilder{} - - opts.Opts = append(opts.Opts, func(opts *ServerAPIOptions) error { - opts.ServerAPIVersion = serverAPIVersion - - return nil - }) - - return opts -} - -// List returns a list of ServerAPIOptions setter functions. -func (s *ServerAPIOptionsBuilder) List() []func(*ServerAPIOptions) error { - return s.Opts +func ServerAPI(serverAPIVersion ServerAPIVersion) *ServerAPIOptions { + return &ServerAPIOptions{ServerAPIVersion: serverAPIVersion} } // SetStrict specifies whether the server should return errors for features that are not part of the API version. -func (s *ServerAPIOptionsBuilder) SetStrict(strict bool) *ServerAPIOptionsBuilder { - s.Opts = append(s.Opts, func(opts *ServerAPIOptions) error { - opts.Strict = &strict - - return nil - }) +func (s *ServerAPIOptions) SetStrict(strict bool) *ServerAPIOptions { + s.Strict = &strict return s } // SetDeprecationErrors specifies whether the server should return errors for deprecated features. -func (s *ServerAPIOptionsBuilder) SetDeprecationErrors(deprecationErrors bool) *ServerAPIOptionsBuilder { - s.Opts = append(s.Opts, func(opts *ServerAPIOptions) error { - opts.DeprecationErrors = &deprecationErrors - - return nil - }) +func (s *ServerAPIOptions) SetDeprecationErrors(deprecationErrors bool) *ServerAPIOptions { + s.DeprecationErrors = &deprecationErrors return s } diff --git a/mongo/with_transactions_test.go b/mongo/with_transactions_test.go index 3b4d927754..0ffb3b1182 100644 --- a/mongo/with_transactions_test.go +++ b/mongo/with_transactions_test.go @@ -578,7 +578,7 @@ func TestConvenientTransactions(t *testing.T) { }) } -func setupConvenientTransactions(t *testing.T, extraClientOpts ...options.Lister[options.ClientOptions]) *Client { +func setupConvenientTransactions(t *testing.T, extraClientOpts ...*options.ClientOptions) *Client { cs := integtest.ConnString(t) poolMonitor := &event.PoolMonitor{ Event: func(evt *event.PoolEvent) { @@ -597,7 +597,7 @@ func setupConvenientTransactions(t *testing.T, extraClientOpts ...options.Lister SetWriteConcern(writeconcern.Majority()). SetPoolMonitor(poolMonitor) integtest.AddTestServerAPIVersion(baseClientOpts) - fullClientOpts := []options.Lister[options.ClientOptions]{baseClientOpts} + fullClientOpts := []*options.ClientOptions{baseClientOpts} fullClientOpts = append(fullClientOpts, extraClientOpts...) client, err := Connect(fullClientOpts...) diff --git a/testdata/atlas-data-lake-testing/README.rst b/testdata/atlas-data-lake-testing/README.rst deleted file mode 100644 index f748993844..0000000000 --- a/testdata/atlas-data-lake-testing/README.rst +++ /dev/null @@ -1,85 +0,0 @@ -===================== -Atlas Data Lake Tests -===================== - -.. contents:: - ----- - -Introduction -============ - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to assert compatibility with `Atlas Data Lake `_. - -Running these integration tests will require a running ``mongohoused`` -with data available in its ``test.driverdata`` collection. See the -`ADL directory in drivers-evergreen-tools `_ -and `10gen/mongohouse README `_ -for more information. - -Several prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Test Format -=========== - -The same as the `CRUD Spec Test format <../../crud/tests/README.rst#Test-Format>`_. - -Test Runner Implementation -========================== - -The same as the `CRUD Spec Test Runner Implementation <../../crud/tests#test-runner-implementation>`_, -with one notable differences: the test runner for Atlas Data Lake Testing -MUST NOT drop the collection and/or database under test. In contrast to other -CRUD tests, which insert their own data fixtures into an empty collection, data -for these tests is specified in the ``mongohoused`` configuration file. - -Prose Tests -=========== - -The following tests MUST be implemented to fully test compatibility with -Atlas Data Lake. - -#. Test that the driver properly constructs and issues a - `killCursors `_ - command to Atlas Data Lake. For this test, configure an APM listener on a - client and execute a query on the ``test.driverdata`` collection that will - leave a cursor open on the server (e.g. specify ``batchSize=2`` for a query - that would match 3+ documents). Drivers MAY iterate the cursor if necessary - to execute the initial ``find`` command but MUST NOT iterate further to avoid - executing a ``getMore``. - - Observe the CommandSucceededEvent event for the ``find`` command and extract - the cursor's ID and namespace from the response document's ``cursor.id`` and - ``cursor.ns`` fields, respectively. Destroy the cursor object and observe - a CommandStartedEvent and CommandSucceededEvent for the ``killCursors`` - command. Assert that the cursor ID and target namespace in the outgoing - command match the values from the ``find`` command's CommandSucceededEvent. - When matching the namespace, note that the ``killCursors`` field will contain - the collection name and the database may be inferred from either the ``$db`` - field or accessed via the CommandStartedEvent directly. Finally, assert that - the ``killCursors`` CommandSucceededEvent indicates that the expected cursor - was killed in the ``cursorsKilled`` field. - - Note: this test assumes that drivers only issue a ``killCursors`` command - internally when destroying a cursor that may still exist on the server. If - a driver constructs and issues ``killCursors`` commands in other ways (e.g. - public API), this test MUST be adapted to test all such code paths. - -#. Test that the driver can establish a connection with Atlas Data Lake - without authentication. For these tests, create a MongoClient using a - valid connection string without auth credentials and execute a ping - command. - -#. Test that the driver can establish a connection with Atlas Data Lake - with authentication. For these tests, create a MongoClient using a - valid connection string with SCRAM-SHA-1 and credentials from the - drivers-evergreen-tools ADL configuration and execute a ping command. - Repeat this test using SCRAM-SHA-256. - -Changelog -========= - -:2020-07-15: Link to CRUD test runner implementation and note that the collection - under test must not be dropped before each test. diff --git a/testdata/auth/README.rst b/testdata/auth/README.rst deleted file mode 100644 index 3bf86f4fb1..0000000000 --- a/testdata/auth/README.rst +++ /dev/null @@ -1,53 +0,0 @@ -========== -Auth Tests -========== - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the Auth Spec at least with -respect to connection string URI input. - -Drivers should do additional unit testing if there are alternate ways of -configuring credentials on a client. - -Driver must also conduct the prose tests in the Auth Spec test plan section. - -Format ------- - -Each YAML file contains an object with a single ``tests`` key. This key is an -array of test case objects, each of which have the following keys: - -- ``description``: A string describing the test. -- ``uri``: A string containing the URI to be parsed. -- ``valid:`` A boolean indicating if the URI should be considered valid. -- ``credential``: If null, the credential must not be considered configured for the - the purpose of deciding if the driver should authenticate to the topology. If non-null, - it is an object containing one or more of the following properties of a credential: - - - ``username``: A string containing the username. For auth mechanisms - that do not utilize a password, this may be the entire ``userinfo`` token - from the connection string. - - ``password``: A string containing the password. - - ``source``: A string containing the authentication database. - - ``mechanism``: A string containing the authentication mechanism. A null value for - this key is used to indicate that a mechanism wasn't specified and that mechanism - negotiation is required. Test harnesses should modify the mechanism test as needed - to assert this condition. - - ``mechanism_properties``: A document containing mechanism-specific properties. It - specifies a subset of properties that must match. If a key exists in the test data, - it must exist with the corresponding value in the credential. Other values may - exist in the credential without failing the test. - -If any key is missing, no assertion about that key is necessary. Except as -specified explicitly above, if a key is present, but the test value is null, -the observed value for that key must be uninitialized (whatever that means for -a given driver and data type). - -Implementation notes -==================== - -Testing whether a URI is valid or not should simply be a matter of checking -whether URI parsing (or MongoClient construction) raises an error or exception. - -If a credential is configured, its properties must be compared to the -``credential`` field. diff --git a/testdata/auth/mongodb-aws.rst b/testdata/auth/mongodb-aws.rst deleted file mode 100644 index 092db3f202..0000000000 --- a/testdata/auth/mongodb-aws.rst +++ /dev/null @@ -1,94 +0,0 @@ -=========== -MongoDB AWS -=========== - -There are 5 scenarios drivers MUST test: - -#. ``Regular Credentials``: Auth via an ``ACCESS_KEY_ID`` and ``SECRET_ACCESS_KEY`` pair -#. ``EC2 Credentials``: Auth from an EC2 instance via temporary credentials assigned to the machine -#. ``ECS Credentials``: Auth from an ECS instance via temporary credentials assigned to the task -#. ``Assume Role``: Auth via temporary credentials obtained from an STS AssumeRole request -#. ``AWS Lambda``: Auth via environment variables ``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, and ``AWS_SESSION_TOKEN``. - -For brevity, this section gives the values ````, ```` and ```` in place of a valid access key ID, secret access key and session token (also known as a security token). Note that if these values are passed into the URI they MUST be URL encoded. Sample values are below. - -.. code-block:: - - AccessKeyId=AKIAI44QH8DHBEXAMPLE - SecretAccessKey=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY - Token=AQoDYXdzEJr... -| -.. sectnum:: - -Regular credentials -====================== - -Drivers MUST be able to authenticate by providing a valid access key id and secret access key pair as the username and password, respectively, in the MongoDB URI. An example of a valid URI would be: - -.. code-block:: - - mongodb://:@localhost/?authMechanism=MONGODB-AWS -| -EC2 Credentials -=============== - -Drivers MUST be able to authenticate from an EC2 instance via temporary credentials assigned to the machine. A sample URI on an EC2 machine would be: - -.. code-block:: - - mongodb://localhost/?authMechanism=MONGODB-AWS -| -.. note:: No username, password or session token is passed into the URI. Drivers MUST query the EC2 instance endpoint to obtain these credentials. - -ECS instance -============ - -Drivers MUST be able to authenticate from an ECS container via temporary credentials. A sample URI in an ECS container would be: - -.. code-block:: - - mongodb://localhost/?authMechanism=MONGODB-AWS -| -.. note:: No username, password or session token is passed into the URI. Drivers MUST query the ECS container endpoint to obtain these credentials. - -AssumeRole -========== - -Drivers MUST be able to authenticate using temporary credentials returned from an assume role request. These temporary credentials consist of an access key ID, a secret access key, and a security token passed into the URI. A sample URI would be: - -.. code-block:: - - mongodb://:@localhost/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN: -| -AWS Lambda -========== - -Drivers MUST be able to authenticate via an access key ID, secret access key and optional session token taken from the environment variables, respectively: - -.. code-block:: - - AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY - AWS_SESSION_TOKEN -| - -Sample URIs both with and without optional session tokens set are shown below. Drivers MUST test both cases. - -.. code-block:: bash - - # without a session token - export AWS_ACCESS_KEY_ID="" - export AWS_SECRET_ACCESS_KEY="" - - URI="mongodb://localhost/?authMechanism=MONGODB-AWS" -| -.. code-block:: bash - - # with a session token - export AWS_ACCESS_KEY_ID="" - export AWS_SECRET_ACCESS_KEY="" - export AWS_SESSION_TOKEN="" - - URI="mongodb://localhost/?authMechanism=MONGODB-AWS" -| -.. note:: No username, password or session token is passed into the URI. Drivers MUST check the environment variables listed above for these values. If the session token is set Drivers MUST use it. diff --git a/testdata/change-streams/README.rst b/testdata/change-streams/README.rst deleted file mode 100644 index 29103abebd..0000000000 --- a/testdata/change-streams/README.rst +++ /dev/null @@ -1,241 +0,0 @@ -.. role:: javascript(code) - :language: javascript - -============== -Change Streams -============== - -.. contents:: - --------- - -Introduction -============ - -The YAML and JSON files in this directory are platform-independent tests that -drivers can use to prove their conformance to the Change Streams Spec. - -Several prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Subdirectories for Test Formats -------------------------------- - -This document describes the legacy format for change streams tests. -Tests in this legacy format are located under ``./legacy/``. - -New change streams tests should be written in the `unified test format <../../unified-test-format/unified-test-format.rst>`__ -and placed under ``./unified/``. - -Spec Test Format -================ - -Each YAML file has the following keys: - -- ``database_name``: The default database -- ``collection_name``: The default collection -- ``database2_name``: Another database -- ``collection2_name``: Another collection -- ``tests``: An array of tests that are to be run independently of each other. - Each test will have some of the following fields: - - - ``description``: The name of the test. - - ``minServerVersion``: The minimum server version to run this test against. If not present, assume there is no minimum server version. - - ``maxServerVersion``: Reserved for later use - - ``failPoint``: Optional configureFailPoint command document to run to configure a fail point on the primary server. - - ``target``: The entity on which to run the change stream. Valid values are: - - - ``collection``: Watch changes on collection ``database_name.collection_name`` - - ``database``: Watch changes on database ``database_name`` - - ``client``: Watch changes on entire clusters - - ``topology``: An array of server topologies against which to run the test. - Valid topologies are ``single``, ``replicaset``, ``sharded``, and "load-balanced". - - ``changeStreamPipeline``: An array of additional aggregation pipeline stages to add to the change stream - - ``changeStreamOptions``: Additional options to add to the changeStream - - ``operations``: Array of documents, each describing an operation. Each document has the following fields: - - - ``database``: Database against which to run the operation - - ``collection``: Collection against which to run the operation - - ``name``: Name of the command to run - - ``arguments`` (optional): Object of arguments for the command (ex: document to insert) - - - ``expectations``: Optional list of command-started events in Extended JSON format - - ``result``: Document with ONE of the following fields: - - - ``error``: Describes an error received during the test - - ``success``: An Extended JSON array of documents expected to be received from the changeStream - -Spec Test Match Function -======================== - -The definition of MATCH or MATCHES in the Spec Test Runner is as follows: - -- MATCH takes two values, ``expected`` and ``actual`` -- Notation is "Assert [actual] MATCHES [expected] -- Assertion passes if ``expected`` is a subset of ``actual``, with the value ``42`` acting as placeholders for "any value" - -Pseudocode implementation of ``actual`` MATCHES ``expected``: - -:: - - If expected is "42" or 42: - Assert that actual exists (is not null or undefined) - Else: - Assert that actual is of the same JSON type as expected - If expected is a JSON array: - For every idx/value in expected: - Assert that actual[idx] MATCHES value - Else if expected is a JSON object: - For every key/value in expected - Assert that actual[key] MATCHES value - Else: - Assert that expected equals actual - -The expected values for ``result.success`` and ``expectations`` are written in Extended JSON. Drivers may adopt any of the following approaches to comparisons, as long as they are consistent: - -- Convert ``actual`` to Extended JSON and compare to ``expected`` -- Convert ``expected`` and ``actual`` to BSON, and compare them -- Convert ``expected`` and ``actual`` to native equivalents of JSON, and compare them - -Spec Test Runner -================ - -Before running the tests - -- Create a MongoClient ``globalClient``, and connect to the server. -When executing tests against a sharded cluster, ``globalClient`` must only connect to one mongos. This is because tests -that set failpoints will only work consistently if both the ``configureFailPoint`` and failing commands are sent to the -same mongos. - -For each YAML file, for each element in ``tests``: - -- If ``topology`` does not include the topology of the server instance(s), skip this test. -- Use ``globalClient`` to - - - Drop the database ``database_name`` - - Drop the database ``database2_name`` - - Create the database ``database_name`` and the collection ``database_name.collection_name`` - - Create the database ``database2_name`` and the collection ``database2_name.collection2_name`` - - If the the ``failPoint`` field is present, configure the fail point on the primary server. See - `Server Fail Point <../../transactions/tests#server-fail-point>`_ in the - Transactions spec test documentation for more information. - -- Create a new MongoClient ``client`` -- Begin monitoring all APM events for ``client``. (If the driver uses global listeners, filter out all events that do not originate with ``client``). Filter out any "internal" commands (e.g. ``hello`` or legacy hello) -- Using ``client``, create a changeStream ``changeStream`` against the specified ``target``. Use ``changeStreamPipeline`` and ``changeStreamOptions`` if they are non-empty. Capture any error. -- If there was no error, use ``globalClient`` and run every operation in ``operations`` in serial against the server until all operations have been executed or an error is thrown. Capture any error. -- If there was no error and ``result.error`` is set, iterate ``changeStream`` once and capture any error. -- If there was no error and ``result.success`` is non-empty, iterate ``changeStream`` until it returns as many changes as there are elements in the ``result.success`` array or an error is thrown. Capture any error. -- Close ``changeStream`` -- If there was an error: - - - Assert that an error was expected for the test. - - Assert that the error MATCHES ``result.error`` - -- Else: - - - Assert that no error was expected for the test - - Assert that the changes received from ``changeStream`` MATCH the results in ``result.success`` - -- If there are any ``expectations`` - - - For each (``expected``, ``idx``) in ``expectations`` - - If ``actual[idx]`` is a ``killCursors`` event, skip it and move to ``actual[idx+1]``. - - Else assert that ``actual[idx]`` MATCHES ``expected`` - - Note: the change stream test command event expectations cover a - prefix subset of all command events published by the driver. - The test runner MUST verify that, if there are N expectations, that the - first N events published by the driver match the expectations, and - MUST NOT inspect any subsequent events published by the driver. - -- Close the MongoClient ``client`` - -After running all tests - -- Close the MongoClient ``globalClient`` -- Drop database ``database_name`` -- Drop database ``database2_name`` - -Iterating the Change Stream ---------------------------- - -Although synchronous drivers must provide a `non-blocking mode of iteration <../change-streams.rst#not-blocking-on-iteration>`_, asynchronous drivers may not have such a mechanism. Those drivers with only a blocking mode of iteration should be careful not to iterate the change stream unnecessarily, as doing so could cause the test runner to block indefinitely. For this reason, the test runner procedure above advises drivers to take a conservative approach to iteration. - -If the test expects an error and one was not thrown by either creating the change stream or executing the test's operations, iterating the change stream once allows for an error to be thrown by a ``getMore`` command. If the test does not expect any error, the change stream should be iterated only until it returns as many result documents as are expected by the test. - -Testing on Sharded Clusters ---------------------------- - -When writing data on sharded clusters, majority-committed data does not always show up in the response of the first -``getMore`` command after the data is written. This is because in sharded clusters, no data from shard A may be returned -until all other shard reports an entry that sorts after the change in shard A. - -To account for this, drivers MUST NOT rely on change stream documents in certain batches. For example, if expecting two -documents in a change stream, these may not be part of the same ``getMore`` response, or even be produced in two -subsequent ``getMore`` responses. Drivers MUST allow for a ``getMore`` to produce empty batches when testing on a -sharded cluster. By default, this can take up to 10 seconds, but can be controlled by enabling the ``writePeriodicNoops`` -server parameter and configuring the ``periodNoopIntervalSecs`` parameter. Choosing lower values allows for running -change stream tests with smaller timeouts. - -Prose Tests -=========== - -The following tests have not yet been automated, but MUST still be tested. All tests SHOULD be run on both replica sets and sharded clusters unless otherwise specified: - -#. ``ChangeStream`` must continuously track the last seen ``resumeToken`` -#. ``ChangeStream`` will throw an exception if the server response is missing the resume token (if wire version is < 8, this is a driver-side error; for 8+, this is a server-side error) -#. After receiving a ``resumeToken``, ``ChangeStream`` will automatically resume one time on a resumable error with the initial pipeline and options, except for the addition/update of a ``resumeToken``. -#. ``ChangeStream`` will not attempt to resume on any error encountered while executing an ``aggregate`` command. Note that retryable reads may retry ``aggregate`` commands. Drivers should be careful to distinguish retries from resume attempts. Alternatively, drivers may specify ``retryReads=false`` or avoid using a `retryable error <../../retryable-reads/retryable-reads.rst#retryable-error>`_ for this test. -#. **Removed** -#. ``ChangeStream`` will perform server selection before attempting to resume, using initial ``readPreference`` -#. Ensure that a cursor returned from an aggregate command with a cursor id and an initial empty batch is not closed on the driver side. -#. The ``killCursors`` command sent during the "Resume Process" must not be allowed to throw an exception. -#. ``$changeStream`` stage for ``ChangeStream`` against a server ``>=4.0`` and ``<4.0.7`` that has not received any results yet MUST include a ``startAtOperationTime`` option when resuming a change stream. -#. **Removed** -#. For a ``ChangeStream`` under these conditions: - - - Running against a server ``>=4.0.7``. - - The batch is empty or has been iterated to the last document. - - Expected result: - - - ``getResumeToken`` must return the ``postBatchResumeToken`` from the current command response. - -#. For a ``ChangeStream`` under these conditions: - - - Running against a server ``<4.0.7``. - - The batch is empty or has been iterated to the last document. - - Expected result: - - - ``getResumeToken`` must return the ``_id`` of the last document returned if one exists. - - ``getResumeToken`` must return ``resumeAfter`` from the initial aggregate if the option was specified. - - If ``resumeAfter`` was not specified, the ``getResumeToken`` result must be empty. - -#. For a ``ChangeStream`` under these conditions: - - - The batch is not empty. - - The batch has been iterated up to but not including the last element. - - Expected result: - - - ``getResumeToken`` must return the ``_id`` of the previous document returned. - -#. For a ``ChangeStream`` under these conditions: - - - The batch is not empty. - - The batch hasn’t been iterated at all. - - Only the initial ``aggregate`` command has been executed. - - Expected result: - - - ``getResumeToken`` must return ``startAfter`` from the initial aggregate if the option was specified. - - ``getResumeToken`` must return ``resumeAfter`` from the initial aggregate if the option was specified. - - If neither the ``startAfter`` nor ``resumeAfter`` options were specified, the ``getResumeToken`` result must be empty. - - Note that this test cannot be run against sharded topologies because in that case the initial ``aggregate`` command only establishes cursors on the shards and always returns an empty ``firstBatch``. - -#. **Removed** -#. **Removed** -#. ``$changeStream`` stage for ``ChangeStream`` started with ``startAfter`` against a server ``>=4.1.1`` that has not received any results yet MUST include a ``startAfter`` option and MUST NOT include a ``resumeAfter`` option when resuming a change stream. -#. ``$changeStream`` stage for ``ChangeStream`` started with ``startAfter`` against a server ``>=4.1.1`` that has received at least one result MUST include a ``resumeAfter`` option and MUST NOT include a ``startAfter`` option when resuming a change stream. diff --git a/testdata/client-side-encryption/README.rst b/testdata/client-side-encryption/README.rst deleted file mode 100644 index 782e511920..0000000000 --- a/testdata/client-side-encryption/README.rst +++ /dev/null @@ -1,1076 +0,0 @@ -============================ -In-Use Encryption Tests -============================ - -.. contents:: - ----- - -Introduction -============ - -This document describes the format of the driver spec tests included in the JSON -and YAML files included in this directory. - -Additional prose tests, that are not represented in the spec tests, are described -and MUST be implemented by all drivers. - -Spec Test Format -================ - -The spec tests format is an extension of `transactions spec tests `_ with some additions: - -- A ``json_schema`` to set on the collection used for operations. - -- A ``key_vault_data`` of data that should be inserted in the key vault collection before each test. - -- Introduction ``autoEncryptOpts`` to `clientOptions` - -- Addition of `$db` to command in `command_started_event` - -- Addition of `$$type` to command_started_event and outcome. - -The semantics of `$$type` is that any actual value matching the BSON type indicated by the BSON type string is considered a match. - -For example, the following matches a command_started_event for an insert of a document where `random` must be of type ``binData``:: - - - command_started_event: - command: - insert: *collection_name - documents: - - { random: { $$type: "binData" } } - ordered: true - command_name: insert - - -The values of `$$type` correspond to `these documented string representations of BSON types `_. - - -Each YAML file has the following keys: - -.. |txn| replace:: Unchanged from Transactions spec tests. - -- ``runOn`` |txn| - -- ``database_name`` |txn| - -- ``collection_name`` |txn| - -- ``data`` |txn| - -- ``json_schema`` A JSON Schema that should be set on the collection (using ``createCollection``) before each test run. - -- ``key_vault_data`` The data that should exist in the key vault collection under test before each test run. - -- ``tests``: An array of tests that are to be run independently of each other. - Each test will have some or all of the following fields: - - - ``description``: |txn| - - - ``skipReason``: |txn| - - - ``clientOptions``: Optional, parameters to pass to MongoClient(). - - - ``autoEncryptOpts``: Optional - - - ``kmsProviders`` A dictionary of KMS providers to set on the key vault ("aws" or "local") - - - ``aws`` The AWS KMS provider. An empty object. Drivers MUST fill in AWS credentials (`accessKeyId`, `secretAccessKey`) from the environment. - - - ``azure`` The Azure KMS provider credentials. An empty object. Drivers MUST fill in Azure credentials (`tenantId`, `clientId`, and `clientSecret`) from the environment. - - - ``gcp`` The GCP KMS provider credentials. An empty object. Drivers MUST fill in GCP credentials (`email`, `privateKey`) from the environment. - - - ``local`` The local KMS provider. - - - ``key`` A 96 byte local key. - - - ``schemaMap``: Optional, a map from namespaces to local JSON schemas. - - - ``keyVaultNamespace``: Optional, a namespace to the key vault collection. Defaults to "keyvault.datakeys". - - - ``bypassAutoEncryption``: Optional, a boolean to indicate whether or not auto encryption should be bypassed. Defaults to ``false``. - - - ``operations``: Array of documents, each describing an operation to be - executed. Each document has the following fields: - - - ``name``: |txn| - - - ``object``: |txn|. Defaults to "collection" if omitted. - - - ``collectionOptions``: |txn| - - - ``command_name``: |txn| - - - ``arguments``: |txn| - - - ``result``: |txn| - - - ``expectations``: |txn| - - - ``outcome``: |txn| - - - -Use as integration tests -======================== - -Do the following before running spec tests: - -- Start the mongocryptd process. -- Start a mongod process with **server version 4.1.9 or later**. -- Place credentials to an AWS IAM user (access key ID + secret access key) somewhere in the environment outside of tracked code. (If testing on evergreen, project variables are a good place). - -Load each YAML (or JSON) file using a Canonical Extended JSON parser. - -Then for each element in ``tests``: - -#. If the ``skipReason`` field is present, skip this test completely. -#. If the ``key_vault_data`` field is present: - - #. Drop the ``keyvault.datakeys`` collection using writeConcern "majority". - #. Insert the data specified into the ``keyvault.datakeys`` with write concern "majority". - -#. Create a MongoClient. - -#. Create a collection object from the MongoClient, using the ``database_name`` - and ``collection_name`` fields from the YAML file. Drop the collection - with writeConcern "majority". If a ``json_schema`` is defined in the test, - use the ``createCollection`` command to explicitly create the collection: - - .. code:: typescript - - {"create": , "validator": {"$jsonSchema": }} - -#. If the YAML file contains a ``data`` array, insert the documents in ``data`` - into the test collection, using writeConcern "majority". - -#. Create a **new** MongoClient using ``clientOptions``. - - #. If ``autoEncryptOpts`` includes ``aws``, ``awsTemporary``, ``awsTemporaryNoSessionToken``, - ``azure``, and/or ``gcp`` as a KMS provider, pass in credentials from the environment. - - - ``awsTemporary``, and ``awsTemporaryNoSessionToken`` require temporary - AWS credentials. These can be retrieved using the csfle `set-temp-creds.sh - `_ - script. - - - ``aws``, ``awsTemporary``, and ``awsTemporaryNoSessionToken`` are - mutually exclusive. - - ``aws`` should be substituted with: - - .. code:: javascript - - "aws": { - "accessKeyId": , - "secretAccessKey": - } - - ``awsTemporary`` should be substituted with: - - .. code:: javascript - - "aws": { - "accessKeyId": , - "secretAccessKey": - "sessionToken": - } - - ``awsTemporaryNoSessionToken`` should be substituted with: - - .. code:: javascript - - "aws": { - "accessKeyId": , - "secretAccessKey": - } - - ``gcp`` should be substituted with: - - .. code:: javascript - - "gcp": { - "email": , - "privateKey": , - } - - ``azure`` should be substituted with: - - .. code:: javascript - - "azure": { - "tenantId": , - "clientId": , - "clientSecret": , - } - - ``local`` should be substituted with: - - .. code:: javascript - - "local": { "key": } - - #. If ``autoEncryptOpts`` does not include ``keyVaultNamespace``, default it - to ``keyvault.datakeys``. - -#. For each element in ``operations``: - - - Enter a "try" block or your programming language's closest equivalent. - - Create a Database object from the MongoClient, using the ``database_name`` - field at the top level of the test file. - - Create a Collection object from the Database, using the - ``collection_name`` field at the top level of the test file. - If ``collectionOptions`` is present create the Collection object with the - provided options. Otherwise create the object with the default options. - - Execute the named method on the provided ``object``, passing the - arguments listed. - - If the driver throws an exception / returns an error while executing this - series of operations, store the error message and server error code. - - If the result document has an "errorContains" field, verify that the - method threw an exception or returned an error, and that the value of the - "errorContains" field matches the error string. "errorContains" is a - substring (case-insensitive) of the actual error message. - - If the result document has an "errorCodeName" field, verify that the - method threw a command failed exception or returned an error, and that - the value of the "errorCodeName" field matches the "codeName" in the - server error response. - - If the result document has an "errorLabelsContain" field, verify that the - method threw an exception or returned an error. Verify that all of the - error labels in "errorLabelsContain" are present in the error or exception - using the ``hasErrorLabel`` method. - - If the result document has an "errorLabelsOmit" field, verify that the - method threw an exception or returned an error. Verify that none of the - error labels in "errorLabelsOmit" are present in the error or exception - using the ``hasErrorLabel`` method. - - If the operation returns a raw command response, eg from ``runCommand``, - then compare only the fields present in the expected result document. - Otherwise, compare the method's return value to ``result`` using the same - logic as the CRUD Spec Tests runner. - -#. If the test includes a list of command-started events in ``expectations``, - compare them to the actual command-started events using the - same logic as the Command Monitoring Spec Tests runner. - -#. For each element in ``outcome``: - - - If ``name`` is "collection", create a new MongoClient *without encryption* - and verify that the test collection contains exactly the documents in the - ``data`` array. Ensure this find reads the latest data by using - **primary read preference** with **local read concern** even when the - MongoClient is configured with another read preference or read concern. - -The spec test MUST be run with *and* without auth. - -Prose Tests -=========== - -Tests for the ClientEncryption type are not included as part of the YAML tests. - -In the prose tests LOCAL_MASTERKEY refers to the following base64: - -.. code:: javascript - - Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk - -Perform all applicable operations on key vault collections (e.g. inserting an example data key, or running a find command) with readConcern/writeConcern "majority". - -Data key and double encryption -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -First, perform the setup. - -#. Create a MongoClient without encryption enabled (referred to as ``client``). Enable command monitoring to listen for command_started events. - -#. Using ``client``, drop the collections ``keyvault.datakeys`` and ``db.coll``. - -#. Create the following: - - - A MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - A ``ClientEncryption`` object (referred to as ``client_encryption``) - - Configure both objects with the following KMS providers: - - .. code:: javascript - - { - "aws": { - "accessKeyId": , - "secretAccessKey": - }, - "azure": { - "tenantId": , - "clientId": , - "clientSecret": , - }, - "gcp": { - "email": , - "privateKey": , - } - "local": { "key": } - } - - Configure both objects with ``keyVaultNamespace`` set to ``keyvault.datakeys``. - - Configure the ``MongoClient`` with the following ``schema_map``: - - .. code:: javascript - - { - "db.coll": { - "bsonType": "object", - "properties": { - "encrypted_placeholder": { - "encrypt": { - "keyId": "/placeholder", - "bsonType": "string", - "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" - } - } - } - } - } - - Configure ``client_encryption`` with the ``keyVaultClient`` of the previously created ``client``. - -For each KMS provider (``aws``, ``azure``, ``gcp``, and ``local``), referred to as ``provider_name``, run the following test. - -#. Call ``client_encryption.createDataKey()``. - - - Set keyAltNames to ``["_altname"]``. - - Set the masterKey document based on ``provider_name``. - - For "aws": - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - } - - For "azure": - - .. code:: javascript - - { - "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", - "keyName": "key-name-csfle" - } - - For "gcp": - - .. code:: javascript - - { - "projectId": "devprod-drivers", - "location": "global", - "keyRing": "key-ring-csfle", - "keyName": "key-name-csfle" - } - - For "local", do not set a masterKey document. - - Expect a BSON binary with subtype 4 to be returned, referred to as ``datakey_id``. - - Use ``client`` to run a ``find`` on ``keyvault.datakeys`` by querying with the ``_id`` set to the ``datakey_id``. - - Expect that exactly one document is returned with the "masterKey.provider" equal to ``provider_name``. - - Check that ``client`` captured a command_started event for the ``insert`` command containing a majority writeConcern. - -#. Call ``client_encryption.encrypt()`` with the value "hello ", the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the ``key_id`` of ``datakey_id``. - - - Expect the return value to be a BSON binary subtype 6, referred to as ``encrypted``. - - Use ``client_encrypted`` to insert ``{ _id: "", "value": }`` into ``db.coll``. - - Use ``client_encrypted`` to run a find querying with ``_id`` of "" and expect ``value`` to be "hello ". - -#. Call ``client_encryption.encrypt()`` with the value "hello ", the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the ``key_alt_name`` of ``_altname``. - - - Expect the return value to be a BSON binary subtype 6. Expect the value to exactly match the value of ``encrypted``. - -#. Test explicit encrypting an auto encrypted field. - - - Use ``client_encrypted`` to attempt to insert ``{ "encrypted_placeholder": }`` - - Expect an exception to be thrown, since this is an attempt to auto encrypt an already encrypted value. - - - -External Key Vault Test -~~~~~~~~~~~~~~~~~~~~~~~ - -Run the following tests twice, parameterized by a boolean ``withExternalKeyVault``. - -#. Create a MongoClient without encryption enabled (referred to as ``client``). - -#. Using ``client``, drop the collections ``keyvault.datakeys`` and ``db.coll``. - Insert the document `external/external-key.json <../external/external-key.json>`_ into ``keyvault.datakeys``. - -#. Create the following: - - - A MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - A ``ClientEncryption`` object (referred to as ``client_encryption``) - - Configure both objects with the ``local`` KMS providers as follows: - - .. code:: javascript - - { "local": { "key": } } - - Configure both objects with ``keyVaultNamespace`` set to ``keyvault.datakeys``. - - Configure ``client_encrypted`` to use the schema `external/external-schema.json <../external/external-schema.json>`_ for ``db.coll`` by setting a schema map like: ``{ "db.coll": }`` - - If ``withExternalKeyVault == true``, configure both objects with an external key vault client. The external client MUST connect to the same - MongoDB cluster that is being tested against, except it MUST use the username ``fake-user`` and password ``fake-pwd``. - -#. Use ``client_encrypted`` to insert the document ``{"encrypted": "test"}`` into ``db.coll``. - If ``withExternalKeyVault == true``, expect an authentication exception to be thrown. Otherwise, expect the insert to succeed. - -#. Use ``client_encryption`` to explicitly encrypt the string ``"test"`` with key ID ``LOCALAAAAAAAAAAAAAAAAA==`` and deterministic algorithm. - If ``withExternalKeyVault == true``, expect an authentication exception to be thrown. Otherwise, expect the insert to succeed. - - -BSON size limits and batch splitting -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -First, perform the setup. - -#. Create a MongoClient without encryption enabled (referred to as ``client``). - -#. Using ``client``, drop and create the collection ``db.coll`` configured with the included JSON schema `limits/limits-schema.json <../limits/limits-schema.json>`_. - -#. Using ``client``, drop the collection ``keyvault.datakeys``. Insert the document `limits/limits-key.json <../limits/limits-key.json>`_ - -#. Create a MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - Configure with the ``local`` KMS provider as follows: - - .. code:: javascript - - { "local": { "key": } } - - Configure with the ``keyVaultNamespace`` set to ``keyvault.datakeys``. - -Using ``client_encrypted`` perform the following operations: - -#. Insert ``{ "_id": "over_2mib_under_16mib", "unencrypted": }``. - - Expect this to succeed since this is still under the ``maxBsonObjectSize`` limit. - -#. Insert the document `limits/limits-doc.json <../limits/limits-doc.json>`_ concatenated with ``{ "_id": "encryption_exceeds_2mib", "unencrypted": < the string "a" repeated (2097152 - 2000) times > }`` - Note: limits-doc.json is a 1005 byte BSON document that encrypts to a ~10,000 byte document. - - Expect this to succeed since after encryption this still is below the normal maximum BSON document size. - Note, before auto encryption this document is under the 2 MiB limit. After encryption it exceeds the 2 MiB limit, but does NOT exceed the 16 MiB limit. - -#. Bulk insert the following: - - - ``{ "_id": "over_2mib_1", "unencrypted": }`` - - - ``{ "_id": "over_2mib_2", "unencrypted": }`` - - Expect the bulk write to succeed and split after first doc (i.e. two inserts occur). This may be verified using `command monitoring `_. - -#. Bulk insert the following: - - - The document `limits/limits-doc.json <../limits/limits-doc.json>`_ concatenated with ``{ "_id": "encryption_exceeds_2mib_1", "unencrypted": < the string "a" repeated (2097152 - 2000) times > }`` - - - The document `limits/limits-doc.json <../limits/limits-doc.json>`_ concatenated with ``{ "_id": "encryption_exceeds_2mib_2", "unencrypted": < the string "a" repeated (2097152 - 2000) times > }`` - - Expect the bulk write to succeed and split after first doc (i.e. two inserts occur). This may be verified using `command monitoring `_. - -#. Insert ``{ "_id": "under_16mib", "unencrypted": ``. - - Expect this to succeed since this is still (just) under the ``maxBsonObjectSize`` limit. - -#. Insert the document `limits/limits-doc.json <../limits/limits-doc.json>`_ concatenated with ``{ "_id": "encryption_exceeds_16mib", "unencrypted": < the string "a" repeated (16777216 - 2000) times > }`` - - Expect this to fail since encryption results in a document exceeding the ``maxBsonObjectSize`` limit. - -Optionally, if it is possible to mock the maxWriteBatchSize (i.e. the maximum number of documents in a batch) test that setting maxWriteBatchSize=1 and inserting the two documents ``{ "_id": "a" }, { "_id": "b" }`` with ``client_encrypted`` splits the operation into two inserts. - - -Views are prohibited -~~~~~~~~~~~~~~~~~~~~ - -#. Create a MongoClient without encryption enabled (referred to as ``client``). - -#. Using ``client``, drop and create a view named ``db.view`` with an empty pipeline. E.g. using the command ``{ "create": "view", "viewOn": "coll" }``. - -#. Create a MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - Configure with the ``local`` KMS provider as follows: - - .. code:: javascript - - { "local": { "key": } } - - Configure with the ``keyVaultNamespace`` set to ``keyvault.datakeys``. - -#. Using ``client_encrypted``, attempt to insert a document into ``db.view``. Expect an exception to be thrown containing the message: "cannot auto encrypt a view". - - -Corpus Test -~~~~~~~~~~~ - -The corpus test exhaustively enumerates all ways to encrypt all BSON value types. Note, the test data includes BSON binary subtype 4 (or standard UUID), which MUST be decoded and encoded as subtype 4. Run the test as follows. - -1. Create a MongoClient without encryption enabled (referred to as ``client``). - -2. Using ``client``, drop and create the collection ``db.coll`` configured with the included JSON schema `corpus/corpus-schema.json <../corpus/corpus-schema.json>`_. - -3. Using ``client``, drop the collection ``keyvault.datakeys``. Insert the documents `corpus/corpus-key-local.json <../corpus/corpus-key-local.json>`_, `corpus/corpus-key-aws.json <../corpus/corpus-key-aws.json>`_, `corpus/corpus-key-azure.json <../corpus/corpus-key-azure.json>`_, and `corpus/corpus-key-gcp.json <../corpus/corpus-key-gcp.json>`_. - -4. Create the following: - - - A MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - A ``ClientEncryption`` object (referred to as ``client_encryption``) - - Configure both objects with ``aws``, ``azure``, ``gcp``, and ``local`` KMS providers as follows: - - .. code:: javascript - - { - "aws": { }, - "azure": { }, - "gcp": { }, - "local": { "key": } - } - - Where LOCAL_MASTERKEY is the following base64: - - .. code:: javascript - - Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk - - Configure both objects with ``keyVaultNamespace`` set to ``keyvault.datakeys``. - -5. Load `corpus/corpus.json <../corpus/corpus.json>`_ to a variable named ``corpus``. The corpus contains subdocuments with the following fields: - - - ``kms`` is either ``aws``, ``azure``, ``gcp``, or ``local`` - - ``type`` is a BSON type string `names coming from here `_) - - ``algo`` is either ``rand`` or ``det`` for random or deterministic encryption - - ``method`` is either ``auto``, for automatic encryption or ``explicit`` for explicit encryption - - ``identifier`` is either ``id`` or ``altname`` for the key identifier - - ``allowed`` is a boolean indicating whether the encryption for the given parameters is permitted. - - ``value`` is the value to be tested. - - Create a new BSON document, named ``corpus_copied``. - Iterate over each field of ``corpus``. - - - If the field name is ``_id``, ``altname_aws``, ``altname_local``, ``altname_azure``, or ``altname_gcp``, copy the field to ``corpus_copied``. - - If ``method`` is ``auto``, copy the field to ``corpus_copied``. - - If ``method`` is ``explicit``, use ``client_encryption`` to explicitly encrypt the value. - - - Encrypt with the algorithm described by ``algo``. - - If ``identifier`` is ``id`` - - - If ``kms`` is ``local`` set the key_id to the UUID with base64 value ``LOCALAAAAAAAAAAAAAAAAA==``. - - If ``kms`` is ``aws`` set the key_id to the UUID with base64 value ``AWSAAAAAAAAAAAAAAAAAAA==``. - - If ``kms`` is ``azure`` set the key_id to the UUID with base64 value ``AZUREAAAAAAAAAAAAAAAAA==``. - - If ``kms`` is ``gcp`` set the key_id to the UUID with base64 value ``GCPAAAAAAAAAAAAAAAAAAA==``. - - - If ``identifier`` is ``altname`` - - - If ``kms`` is ``local`` set the key_alt_name to "local". - - If ``kms`` is ``aws`` set the key_alt_name to "aws". - - If ``kms`` is ``azure`` set the key_alt_name to "azure". - - If ``kms`` is ``gcp`` set the key_alt_name to "gcp". - - If ``allowed`` is true, copy the field and encrypted value to ``corpus_copied``. - If ``allowed`` is false. verify that an exception is thrown. Copy the unencrypted value to to ``corpus_copied``. - - -6. Using ``client_encrypted``, insert ``corpus_copied`` into ``db.coll``. - -7. Using ``client_encrypted``, find the inserted document from ``db.coll`` to a variable named ``corpus_decrypted``. Since it should have been automatically decrypted, assert the document exactly matches ``corpus``. - -8. Load `corpus/corpus_encrypted.json <../corpus/corpus-encrypted.json>`_ to a variable named ``corpus_encrypted_expected``. - Using ``client`` find the inserted document from ``db.coll`` to a variable named ``corpus_encrypted_actual``. - - Iterate over each field of ``corpus_encrypted_expected`` and check the following: - - - If the ``algo`` is ``det``, that the value equals the value of the corresponding field in ``corpus_encrypted_actual``. - - If the ``algo`` is ``rand`` and ``allowed`` is true, that the value does not equal the value of the corresponding field in ``corpus_encrypted_actual``. - - If ``allowed`` is true, decrypt the value with ``client_encryption``. Decrypt the value of the corresponding field of ``corpus_encrypted`` and validate that they are both equal. - - If ``allowed`` is false, validate the value exactly equals the value of the corresponding field of ``corpus`` (neither was encrypted). - -9. Repeat steps 1-8 with a local JSON schema. I.e. amend step 4 to configure the schema on ``client_encrypted`` with the ``schema_map`` option. - -Custom Endpoint Test -~~~~~~~~~~~~~~~~~~~~ - -Setup -````` - -For each test cases, start by creating two ``ClientEncryption`` objects. Recreate the ``ClientEncryption`` objects for each test case. - -Create a ``ClientEncryption`` object (referred to as ``client_encryption``) - -Configure with ``keyVaultNamespace`` set to ``keyvault.datakeys``, and a default MongoClient as the ``keyVaultClient``. - -Configure with KMS providers as follows: - -.. code:: javascript - - { - "aws": { - "accessKeyId": , - "secretAccessKey": - }, - "azure": { - "tenantId": , - "clientId": , - "clientSecret": , - "identityPlatformEndpoint": "login.microsoftonline.com:443" - }, - "gcp": { - "email": , - "privateKey": , - "endpoint": "oauth2.googleapis.com:443" - } - } - -Create a ``ClientEncryption`` object (referred to as ``client_encryption_invalid``) - -Configure with ``keyVaultNamespace`` set to ``keyvault.datakeys``, and a default MongoClient as the ``keyVaultClient``. - -Configure with KMS providers as follows: - -.. code:: javascript - - { - "azure": { - "tenantId": , - "clientId": , - "clientSecret": , - "identityPlatformEndpoint": "example.com:443" - }, - "gcp": { - "email": , - "privateKey": , - "endpoint": "example.com:443" - } - } - -Test cases -`````````` - -1. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - } - - Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works. - -2. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - endpoint: "kms.us-east-1.amazonaws.com" - } - - Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works. - -3. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - endpoint: "kms.us-east-1.amazonaws.com:443" - } - - Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works. - -4. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - endpoint: "kms.us-east-1.amazonaws.com:12345" - } - - Expect this to fail with a socket connection error. - -5. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - endpoint: "kms.us-east-2.amazonaws.com" - } - - Expect this to fail with an exception with a message containing the string: "us-east-1" - -6. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - endpoint: "example.com" - } - - Expect this to fail with an exception with a message containing the string: "parse error" - -7. Call `client_encryption.createDataKey()` with "azure" as the provider and the following masterKey: - - .. code:: javascript - - { - "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", - "keyName": "key-name-csfle" - } - - Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works. - - Call ``client_encryption_invalid.createDataKey()`` with the same masterKey. Expect this to fail with an exception with a message containing the string: "parse error". - -8. Call `client_encryption.createDataKey()` with "gcp" as the provider and the following masterKey: - - .. code:: javascript - - { - "projectId": "devprod-drivers", - "location": "global", - "keyRing": "key-ring-csfle", - "keyName": "key-name-csfle", - "endpoint": "cloudkms.googleapis.com:443" - } - - Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works. - - Call ``client_encryption_invalid.createDataKey()`` with the same masterKey. Expect this to fail with an exception with a message containing the string: "parse error". - -9. Call `client_encryption.createDataKey()` with "gcp" as the provider and the following masterKey: - - .. code:: javascript - - { - "projectId": "devprod-drivers", - "location": "global", - "keyRing": "key-ring-csfle", - "keyName": "key-name-csfle", - "endpoint": "example.com:443" - } - - Expect this to fail with an exception with a message containing the string: "Invalid KMS response". - -Bypass spawning mongocryptd -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Via mongocryptdBypassSpawn -`````````````````````````` - -The following tests that setting ``mongocryptdBypassSpawn=true`` really does bypass spawning mongocryptd. - -#. Create a MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - Configure the required options. Use the ``local`` KMS provider as follows: - - .. code:: javascript - - { "local": { "key": } } - - Configure with the ``keyVaultNamespace`` set to ``keyvault.datakeys``. - - Configure ``client_encrypted`` to use the schema `external/external-schema.json <../external/external-schema.json>`_ for ``db.coll`` by setting a schema map like: ``{ "db.coll": }`` - - Configure the following ``extraOptions``: - - .. code:: javascript - - { - "mongocryptdBypassSpawn": true - "mongocryptdURI": "mongodb://localhost:27021/db?serverSelectionTimeoutMS=1000", - "mongocryptdSpawnArgs": [ "--pidfilepath=bypass-spawning-mongocryptd.pid", "--port=27021"] - } - - Drivers MAY pass a different port if they expect their testing infrastructure to be using port 27021. Pass a port that should be free. - -#. Use ``client_encrypted`` to insert the document ``{"encrypted": "test"}`` into ``db.coll``. Expect a server selection error propagated from the internal MongoClient failing to connect to mongocryptd on port 27021. - -Via bypassAutoEncryption -```````````````````````` - -The following tests that setting ``bypassAutoEncryption=true`` really does bypass spawning mongocryptd. - -#. Create a MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - Configure the required options. Use the ``local`` KMS provider as follows: - - .. code:: javascript - - { "local": { "key": } } - - Configure with the ``keyVaultNamespace`` set to ``keyvault.datakeys``. - - Configure with ``bypassAutoEncryption=true``. - - Configure the following ``extraOptions``: - - .. code:: javascript - - { - "mongocryptdSpawnArgs": [ "--pidfilepath=bypass-spawning-mongocryptd.pid", "--port=27021"] - } - - Drivers MAY pass a different value to ``--port`` if they expect their testing infrastructure to be using port 27021. Pass a port that should be free. - -#. Use ``client_encrypted`` to insert the document ``{"unencrypted": "test"}`` into ``db.coll``. Expect this to succeed. - -#. Validate that mongocryptd was not spawned. Create a MongoClient to localhost:27021 (or whatever was passed via ``--port``) with serverSelectionTimeoutMS=1000. Run a handshake command and ensure it fails with a server selection timeout. - -Deadlock tests -~~~~~~~~~~~~~~ - -.. _Connection Monitoring and Pooling: /source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.rst - -The following tests only apply to drivers that have implemented a connection pool (see the `Connection Monitoring and Pooling`_ specification). - -There are multiple parameterized test cases. Before each test case, perform the setup. - -Setup -````` - -Create a ``MongoClient`` for setup operations named ``client_test``. - -Create a ``MongoClient`` for key vault operations with ``maxPoolSize=1`` named ``client_keyvault``. Capture command started events. - -Using ``client_test``, drop the collections ``keyvault.datakeys`` and ``db.coll``. - -Insert the document `external/external-key.json <../external/external-key.json>`_ into ``keyvault.datakeys`` with majority write concern. - -Create a collection ``db.coll`` configured with a JSON schema `external/external-schema.json <../external/external-schema.json>`_ as the validator, like so: - -.. code:: typescript - - {"create": "coll", "validator": {"$jsonSchema": }} - -Create a ``ClientEncryption`` object, named ``client_encryption`` configured with: -- ``keyVaultClient``=``client_test`` -- ``keyVaultNamespace``="keyvault.datakeys" -- ``kmsProviders``=``{ "local": { "key": } }`` - -Use ``client_encryption`` to encrypt the value "string0" with ``algorithm``="AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" and ``keyAltName``="local". Store the result in a variable named ``ciphertext``. - -Proceed to run the test case. - -Each test case configures a ``MongoClient`` with automatic encryption (named ``client_encrypted``). - -Each test must assert the number of unique ``MongoClient``s created. This can be accomplished by capturing ``TopologyOpeningEvent``, or by checking command started events for a client identifier (not possible in all drivers). - -Running a test case -``````````````````` -- Create a ``MongoClient`` named ``client_encrypted`` configured as follows: - - Set ``AutoEncryptionOpts``: - - ``keyVaultNamespace="keyvault.datakeys"`` - - ``kmsProviders``=``{ "local": { "key": } }`` - - Append ``TestCase.AutoEncryptionOpts`` (defined below) - - Capture command started events. - - Set ``maxPoolSize=TestCase.MaxPoolSize`` -- If the testcase sets ``AutoEncryptionOpts.bypassAutoEncryption=true``: - - Use ``client_test`` to insert ``{ "_id": 0, "encrypted": }`` into ``db.coll``. -- Otherwise: - - Use ``client_encrypted`` to insert ``{ "_id": 0, "encrypted": "string0" }``. -- Use ``client_encrypted`` to run a ``findOne`` operation on ``db.coll``, with the filter ``{ "_id": 0 }``. -- Expect the result to be ``{ "_id": 0, "encrypted": "string0" }``. -- Check captured events against ``TestCase.Expectations``. -- Check the number of unique ``MongoClient``s created is equal to ``TestCase.ExpectedNumberOfClients``. - -Case 1 -`````` -- MaxPoolSize: 1 -- AutoEncryptionOpts: - - bypassAutoEncryption=false - - keyVaultClient=unset -- Expectations: - - Expect ``client_encrypted`` to have captured four ``CommandStartedEvent``: - - a listCollections to "db". - - a find on "keyvault". - - an insert on "db". - - a find on "db" -- ExpectedNumberOfClients: 2 - -Case 2 -`````` -- MaxPoolSize: 1 -- AutoEncryptionOpts: - - bypassAutoEncryption=false - - keyVaultClient=client_keyvault -- Expectations: - - Expect ``client_encrypted`` to have captured three ``CommandStartedEvent``: - - a listCollections to "db". - - an insert on "db". - - a find on "db" - - Expect ``client_keyvault`` to have captured one ``CommandStartedEvent``: - - a find on "keyvault". -- ExpectedNumberOfClients: 2 - -Case 3 -`````` -- MaxPoolSize: 1 -- AutoEncryptionOpts: - - bypassAutoEncryption=true - - keyVaultClient=unset -- Expectations: - - Expect ``client_encrypted`` to have captured three ``CommandStartedEvent``: - - a find on "db" - - a find on "keyvault". -- ExpectedNumberOfClients: 2 - -Case 4 -`````` -- MaxPoolSize: 1 -- AutoEncryptionOpts: - - bypassAutoEncryption=true - - keyVaultClient=client_keyvault -- Expectations: - - Expect ``client_encrypted`` to have captured two ``CommandStartedEvent``: - - a find on "db" - - Expect ``client_keyvault`` to have captured one ``CommandStartedEvent``: - - a find on "keyvault". -- ExpectedNumberOfClients: 1 - -Case 5 -`````` -Drivers that do not support an unlimited maximum pool size MUST skip this test. - -- MaxPoolSize: 0 -- AutoEncryptionOpts: - - bypassAutoEncryption=false - - keyVaultClient=unset -- Expectations: - - Expect ``client_encrypted`` to have captured five ``CommandStartedEvent``: - - a listCollections to "db". - - a listCollections to "keyvault". - - a find on "keyvault". - - an insert on "db". - - a find on "db" -- ExpectedNumberOfClients: 1 - -Case 6 -`````` -Drivers that do not support an unlimited maximum pool size MUST skip this test. - -- MaxPoolSize: 0 -- AutoEncryptionOpts: - - bypassAutoEncryption=false - - keyVaultClient=client_keyvault -- Expectations: - - Expect ``client_encrypted`` to have captured three ``CommandStartedEvent``: - - a listCollections to "db". - - an insert on "db". - - a find on "db" - - Expect ``client_keyvault`` to have captured one ``CommandStartedEvent``: - - a find on "keyvault". -- ExpectedNumberOfClients: 1 - -Case 7 -`````` -Drivers that do not support an unlimited maximum pool size MUST skip this test. - -- MaxPoolSize: 0 -- AutoEncryptionOpts: - - bypassAutoEncryption=true - - keyVaultClient=unset -- Expectations: - - Expect ``client_encrypted`` to have captured three ``CommandStartedEvent``: - - a find on "db" - - a find on "keyvault". -- ExpectedNumberOfClients: 1 - -Case 8 -`````` -Drivers that do not support an unlimited maximum pool size MUST skip this test. - -- MaxPoolSize: 0 -- AutoEncryptionOpts: - - bypassAutoEncryption=true - - keyVaultClient=client_keyvault -- Expectations: - - Expect ``client_encrypted`` to have captured two ``CommandStartedEvent``: - - a find on "db" - - Expect ``client_keyvault`` to have captured one ``CommandStartedEvent``: - - a find on "keyvault". -- ExpectedNumberOfClients: 1 - -KMS TLS Tests -~~~~~~~~~~~~~ - -.. _ca.pem: https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/x509gen/ca.pem -.. _expired.pem: https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/x509gen/expired.pem -.. _wrong-host.pem: https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/x509gen/wrong-host.pem - -The following tests that connections to KMS servers with TLS verify peer certificates. - -The two tests below make use of mock KMS servers which can be run on Evergreen using `the mock KMS server script `_. -Drivers can set up their local Python environment for the mock KMS server by running `the virtualenv activation script `_. - -To start a mock KMS server on port 8000 with `ca.pem`_ as a CA file and `expired.pem`_ as a cert file, run the following commands from the ``.evergreen/csfle`` directory. - -.. code:: - - . ./activate_venv.sh - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 - -Setup -````` - -For both tests, do the following: - -#. Start a ``mongod`` process with **server version 4.1.9 or later**. - -#. Create a ``MongoClient`` (referred to as ``client_encrypted``) for key vault operations with ``keyVaultNamespace`` set to ``keyvault.datakeys``: - -Invalid KMS Certificate -``````````````````````` - -#. Start a mock KMS server on port 8000 with `ca.pem`_ as a CA file and `expired.pem`_ as a cert file. - -#. Call ``client_encrypted.createDataKey()`` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "127.0.0.1:8000", - } - - Expect this to fail with an exception with a message referencing an expired certificate. This message will be language dependent. - In Python, this message is "certificate verify failed: certificate has expired". In Go, this message is - "certificate has expired or is not yet valid". - -Invalid Hostname in KMS Certificate -``````````````````````````````````` - -#. Start a mock KMS server on port 8001 with `ca.pem`_ as a CA file and `wrong-host.pem`_ as a cert file. - -#. Call ``client_encrypted.createDataKey()`` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "127.0.0.1:8001", - } - - Expect this to fail with an exception with a message referencing an incorrect or unexpected host. This message will be language dependent. - In Python, this message is "certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'". In Go, this message - is "cannot validate certificate for 127.0.0.1 because it doesn't contain any IP SANs". diff --git a/testdata/client-side-operations-timeout/README.rst b/testdata/client-side-operations-timeout/README.rst deleted file mode 100644 index 1ff923f2e5..0000000000 --- a/testdata/client-side-operations-timeout/README.rst +++ /dev/null @@ -1,613 +0,0 @@ -====================================== -Client Side Operations Timeouts Tests -====================================== - -.. contents:: - ----- - -Introduction -============ - -This document describes the tests that drivers MUST run to validate the behavior of the timeoutMS option. These tests -are broken up into automated YAML/JSON tests and additional prose tests. - -Spec Tests -========== - -This directory contains a set of YAML and JSON spec tests. Drivers MUST run these as described in the "Unified Test -Runner" specification. Because the tests introduced in this specification are timing-based, there is a risk that some -of them may intermittently fail without any bugs being present in the driver. As a mitigatio, drivers MAY execute -these tests in two new Evergreen tasks that use single-node replica sets: one with only authentication enabled and -another with both authentication and TLS enabled. Drivers that choose to do so SHOULD use the ``single-node-auth.json`` -and ``single-node-auth-ssl.json`` files in the ``drivers-evergreen-tools`` repository to create these clusters. - -Prose Tests -=========== - -There are some tests that cannot be expressed in the unified YAML/JSON format. For each of these tests, drivers MUST -create a MongoClient without the ``timeoutMS`` option set (referred to as ``internalClient``). Any fail points set -during a test MUST be unset using ``internalClient`` after the test has been executed. All MongoClient instances -created for tests MUST be configured with read/write concern ``majority``, read preference ``primary``, and command -monitoring enabled to listen for ``command_started`` events. - -Multi-batch writes -~~~~~~~~~~~~~~~~~~ - -This test MUST only run against server versions 4.4 and higher. - -#. Using ``internalClient``, drop the ``db.coll`` collection. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { - times: 2 - }, - data: { - failCommands: ["insert"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20``. -#. Using ``client``, insert 100,001 empty documents in a single ``insertMany`` call. - - - Expect this to fail with a timeout error. - -#. Verify that two ``insert`` commands were executed against ``db.coll`` as part of the ``insertMany`` call. - -maxTimeMS is not set for commands sent to mongocryptd -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This test MUST only be run against enterprise server versions 4.2 and higher. - -#. Launch a mongoryptd process on 23000. -#. Create a MongoClient (referred to as ``client``) using the URI ``mongodb://localhost:23000/?timeoutMS=1000``. -#. Using ``client``, execute the ``{ ping: 1 }`` command against the ``admin`` database. -#. Verify via command monitoring that the ``ping`` command sent did not contain a ``maxTimeMS`` field. - -ClientEncryption -~~~~~~~~~~~~~~~~ - -Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, -``LOCAL_MASTERKEY`` refers to the following base64: - -.. code:: javascript - - Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk - -For each test, perform the following setup: - -#. Using ``internalClient``, drop and create the ``keyvault.datakeys`` collection. -#. Create a MongoClient (referred to as ``keyVaultClient``) with ``timeoutMS=10``. -#. Create a ``ClientEncryption`` object that wraps ``keyVaultClient`` (referred to as ``clientEncryption``). Configure this object with ``keyVaultNamespace`` set to ``keyvault.datakeys`` and the following KMS providers map: - - .. code:: javascript - - { - "local": { "key": } - } - -createDataKey -````````````` - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { - times: 1 - }, - data: { - failCommands: ["insert"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Call ``clientEncryption.createDataKey()`` with the ``local`` KMS provider. - - - Expect this to fail with a timeout error. - -#. Verify that an ``insert`` command was executed against to ``keyvault.datakeys`` as part of the ``createDataKey`` call. - -encrypt -``````` - -#. Call ``client_encryption.createDataKey()`` with the ``local`` KMS provider. - - - Expect a BSON binary with subtype 4 to be returned, referred to as ``datakeyId``. - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { - times: 1 - }, - data: { - failCommands: ["find"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Call ``clientEncryption.encrypt()`` with the value ``hello``, the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the keyId ``datakeyId``. - - - Expect this to fail with a timeout error. - -#. Verify that a ``find`` command was executed against the ``keyvault.datakeys`` collection as part of the ``encrypt`` call. - -decrypt -``````` - -#. Call ``clientEncryption.createDataKey()`` with the ``local`` KMS provider. - - - Expect this to return a BSON binary with subtype 4, referred to as ``dataKeyId``. - -#. Call ``clientEncryption.encrypt()`` with the value ``hello``, the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the keyId ``dataKeyId``. - - - Expect this to return a BSON binary with subtype 6, referred to as ``encrypted``. - -#. Close and re-create the ``keyVaultClient`` and ``clientEncryption`` objects. - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { - times: 1 - }, - data: { - failCommands: ["find"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Call ``clientEncryption.decrypt()`` with the value ``encrypted``. - - - Expect this to fail with a timeout error. - -#. Verify that a ``find`` command was executed against the ``keyvault.datakeys`` collection as part of the ``decrypt`` call. - -Background Connection Pooling -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication -fields (i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait -for some CMAP events to be published. Drivers MUST wait for up to 10 seconds and fail the test if the specified events -are not published within that time. - -timeoutMS used for handshake commands -````````````````````````````````````` - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { - times: 1 - }, - data: { - failCommands: ["saslContinue"], - blockConnection: true, - blockTimeMS: 15, - appName: "timeoutBackgroundPoolTest" - } - } - -#. Create a MongoClient (referred to as ``client``) configured with the following: - - - ``minPoolSize`` of 1 - - ``timeoutMS`` of 10 - - ``appName`` of ``timeoutBackgroundPoolTest`` - - CMAP monitor configured to listen for ``ConnectionCreatedEvent`` and ``ConnectionClosedEvent`` events. - -#. Wait for a ``ConnectionCreatedEvent`` and a ``ConnectionClosedEvent`` to be published. - -timeoutMS is refreshed for each handshake command -````````````````````````````````````````````````` - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: "alwaysOn", - data: { - failCommands: ["isMaster", "saslContinue"], - blockConnection: true, - blockTimeMS: 15, - appName: "refreshTimeoutBackgroundPoolTest" - } - } - -#. Create a MongoClient (referred to as ``client``) configured with the following: - - - ``minPoolSize`` of 1 - - ``timeoutMS`` of 20 - - ``appName`` of ``refreshTimeoutBackgroundPoolTest`` - - CMAP monitor configured to listen for ``ConnectionCreatedEvent`` and ``ConnectionReady`` events. - -#. Wait for a ``ConnectionCreatedEvent`` and a ``ConnectionReady`` to be published. - -Blocking Iteration Methods -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a -blocking method for cursor iteration that executes ``getMore`` commands in a loop until a document is available or an -error occurs. - -Tailable cursors -```````````````` - -#. Using ``internalClient``, drop the ``db.coll`` collection. -#. Using ``internalClient``, insert the document ``{ x: 1 }`` into ``db.coll``. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: "alwaysOn", - data: { - failCommands: ["getMore"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20``. -#. Using ``client``, create a tailable cursor on ``db.coll`` with ``cursorType=tailable``. - - - Expect this to succeed and return a cursor with a non-zero ID. - -#. Call either a blocking or non-blocking iteration method on the cursor. - - - Expect this to succeed and return the document ``{ x: 1 }`` without sending a ``getMore`` command. - -#. Call the blocking iteration method on the resulting cursor. - - - Expect this to fail with a timeout error. - -#. Verify that a ``find`` command and two ``getMore`` commands were executed against the ``db.coll`` collection during the test. - -Change Streams -`````````````` - -#. Using ``internalClient``, drop the ``db.coll`` collection. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: "alwaysOn", - data: { - failCommands: ["getMore"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20``. -#. Using ``client``, use the ``watch`` helper to create a change stream against ``db.coll``. - - - Expect this to succeed and return a change stream with a non-zero ID. - -#. Call the blocking iteration method on the resulting change stream. - - - Expect this to fail with a timeout error. - -#. Verify that an ``aggregate`` command and two ``getMore`` commands were executed against the ``db.coll`` collection during the test. - -GridFS - Upload -~~~~~~~~~~~~~~~ - -Tests in this section MUST only be run against server versions 4.4 and higher. - -uploads via openUploadStream can be timed out -````````````````````````````````````````````` - -#. Using ``internalClient``, drop and re-create the ``db.fs.files`` and ``db.fs.chunks`` collections. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { times: 1 }, - data: { - failCommands: ["insert"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10``. -#. Using ``client``, create a GridFS bucket (referred to as ``bucket``) that wraps the ``db`` database. -#. Call ``bucket.open_upload_stream()`` with the filename ``filename`` to create an upload stream (referred to as ``uploadStream``). - - - Expect this to succeed and return a non-null stream. - -#. Using ``uploadStream``, upload a single ``0x12`` byte. -#. Call ``uploadStream.close()`` to flush the stream and insert chunks. - - - Expect this to fail with a timeout error. - -Aborting an upload stream can be timed out -`````````````````````````````````````````` - -This test only applies to drivers that provide an API to abort a GridFS upload stream. - -#. Using ``internalClient``, drop and re-create the ``db.fs.files`` and ``db.fs.chunks`` collections. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { times: 1 }, - data: { - failCommands: ["delete"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10``. -#. Using ``client``, create a GridFS bucket (referred to as ``bucket``) that wraps the ``db`` database with ``chunkSizeBytes=2``. -#. Call ``bucket.open_upload_stream()`` with the filename ``filename`` to create an upload stream (referred to as ``uploadStream``). - - - Expect this to succeed and return a non-null stream. - -#. Using ``uploadStream``, upload the bytes ``[0x01, 0x02, 0x03, 0x04]``. -#. Call ``uploadStream.abort()``. - - - Expect this to fail with a timeout error. - -GridFS - Download -~~~~~~~~~~~~~~~~~ - -This test MUST only be run against server versions 4.4 and higher. - -#. Using ``internalClient``, drop and re-create the ``db.fs.files`` and ``db.fs.chunks`` collections. -#. Using ``internalClient``, insert the following document into the ``db.fs.files`` collection: - - .. code:: javascript - - { - "_id": { - "$oid": "000000000000000000000005" - }, - "length": 10, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", - "filename": "length-10", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10``. -#. Using ``client``, create a GridFS bucket (referred to as ``bucket``) that wraps the ``db`` database. -#. Call ``bucket.open_download_stream`` with the id ``{ "$oid": "000000000000000000000005" }`` to create a download stream (referred to as ``downloadStream``). - - - Expect this to succeed and return a non-null stream. - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { times: 1 }, - data: { - failCommands: ["find"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Read from the ``downloadStream``. - - - Expect this to fail with a timeout error. - -#. Verify that two ``find`` commands were executed during the read: one against ``db.fs.files`` and another against ``db.fs.chunks``. - -Server Selection -~~~~~~~~~~~~~~~~ - -serverSelectionTimeoutMS honored if timeoutMS is not set -```````````````````````````````````````````````````````` - -#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?serverSelectionTimeoutMS=10``. - -#. Using ``client``, execute the command ``{ ping: 1 }`` against the ``admin`` database. - - - Expect this to fail with a server selection timeout error after no more than 15ms. - -timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS -`````````````````````````````````````````````````````````````````````````````````` - -#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20``. - -#. Using ``client``, run the command ``{ ping: 1 }`` against the ``admin`` database. - - - Expect this to fail with a server selection timeout error after no more than 15ms. - -serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS -`````````````````````````````````````````````````````````````````````````````````` - -#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10``. - -#. Using ``client``, run the command ``{ ping: 1 }`` against the ``admin`` database. - - - Expect this to fail with a server selection timeout error after no more than 15ms. - -serverSelectionTimeoutMS honored for server selection if timeoutMS=0 -```````````````````````````````````````````````````````````````````` - -#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10``. - -#. Using ``client``, run the command ``{ ping: 1 }`` against the ``admin`` database. - - - Expect this to fail with a server selection timeout error after no more than 15ms. - -timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS -``````````````````````````````````````````````````````````````````````````````````````````````` - -This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a -username and password). - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: failCommand, - mode: { times: 1 }, - data: { - failCommands: ["saslContinue"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10`` and ``serverSelectionTimeoutMS=20``. -#. Using ``client``, insert the document ``{ x: 1 }`` into collection ``db.coll``. - - - Expect this to fail with a timeout error after no more than 15ms. - -serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS -``````````````````````````````````````````````````````````````````````````````````````````````` - -This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a -username and password). - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: failCommand, - mode: { times: 1 }, - data: { - failCommands: ["saslContinue"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20`` and ``serverSelectionTimeoutMS=10``. -#. Using ``client``, insert the document ``{ x: 1 }`` into collection ``db.coll``. - - - Expect this to fail with a timeout error after no more than 15ms. - -endSession -~~~~~~~~~~ - -This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be -run three times: once with the timeout specified via the MongoClient ``timeoutMS`` option, once with the timeout -specified via the ClientSession ``defaultTimeoutMS`` option, and once more with the timeout specified via the -``timeoutMS`` option for the ``endSession`` operation. In all cases, the timeout MUST be set to 10 milliseconds. - -#. Using ``internalClient``, drop the ``db.coll`` collection. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: failCommand, - mode: { times: 1 }, - data: { - failCommands: ["abortTransaction"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) and an explicit ClientSession derived from that MongoClient (referred to as ``session``). -#. Execute the following code: - - .. code:: typescript - - coll = client.database("db").collection("coll") - session.start_transaction() - coll.insert_one({x: 1}, session=session) - -#. Using ``session``, execute ``session.end_session`` - - - Expect this to fail with a timeout error after no more than 15ms. - -Convenient Transactions -~~~~~~~~~~~~~~~~~~~~~~~ - -Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. - -timeoutMS is refreshed for abortTransaction if the callback fails -````````````````````````````````````````````````````````````````` - -#. Using ``internalClient``, drop the ``db.coll`` collection. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: failCommand, - mode: { times: 2 }, - data: { - failCommands: ["insert", "abortTransaction"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) configured with ``timeoutMS=10`` and an explicit ClientSession derived from that MongoClient (referred to as ``session``). -#. Using ``session``, execute a ``withTransaction`` operation with the following callback: - - .. code:: typescript - - def callback() { - coll = client.database("db").collection("coll") - coll.insert_one({ _id: 1 }, session=session) - } - -#. Expect the previous ``withTransaction`` call to fail with a timeout error. -#. Verify that the following events were published during the ``withTransaction`` call: - - #. ``command_started`` and ``command_failed`` events for an ``insert`` command. - #. ``command_started`` and ``command_failed`` events for an ``abortTransaction`` command. - -Unit Tests -========== - -The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement -these if it is possible to do so using the driver's existing test infrastructure. - -- Operations should ignore ``waitQueueTimeoutMS`` if ``timeoutMS`` is also set. -- If ``timeoutMS`` is set for an operation, the remaining ``timeoutMS`` value should apply to connection checkout after a server has been selected. -- If ``timeoutMS`` is not set for an operation, ``waitQueueTimeoutMS`` should apply to connection checkout after a server has been selected. -- If a new connection is required to execute an operation, ``min(remaining computedServerSelectionTimeout, connectTimeoutMS)`` should apply to socket establishment. -- For drivers that have control over OCSP behavior, ``min(remaining computedServerSelectionTimeout, 5 seconds)`` should apply to HTTP requests against OCSP responders. -- If ``timeoutMS`` is unset, operations fail after two non-consecutive socket timeouts. -- The remaining ``timeoutMS`` value should apply to HTTP requests against KMS servers for CSFLE. -- The remaining ``timeoutMS`` value should apply to commands sent to mongocryptd as part of automatic encryption. -- When doing ``minPoolSize`` maintenance, ``connectTimeoutMS`` is used as the timeout for socket establishment. diff --git a/testdata/collection-management/README.rst b/testdata/collection-management/README.rst deleted file mode 100644 index 2fd8825a52..0000000000 --- a/testdata/collection-management/README.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -Collection Management Tests -=========================== - -This directory contains tests for collection management. They are implemented -in the `Unified Test Format <../../unified-test-format/unified-test-format.rst>`__. diff --git a/testdata/connection-monitoring-and-pooling/README.rst b/testdata/connection-monitoring-and-pooling/README.rst deleted file mode 100644 index 016e50507d..0000000000 --- a/testdata/connection-monitoring-and-pooling/README.rst +++ /dev/null @@ -1,228 +0,0 @@ -.. role:: javascript(code) - :language: javascript - -======================================== -Connection Monitoring and Pooling (CMAP) -======================================== - -.. contents:: - --------- - -Introduction -============ - -The YAML and JSON files in this directory are platform-independent tests that -drivers can use to prove their conformance to the Connection Monitoring and Pooling (CMAP) Spec. - -Several prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Common Test Format -================== - -Each YAML file has the following keys: - -- ``version``: A version number indicating the expected format of the spec tests (current version = 1) -- ``style``: A string indicating what style of tests this file contains. Contains one of the following: - - - ``"unit"``: a test that may be run without connecting to a MongoDB deployment. - - ``"integration"``: a test that MUST be run against a real MongoDB deployment. - -- ``description``: A text description of what the test is meant to assert - -Unit Test Format: -================= - -All Unit Tests have some of the following fields: - -- ``poolOptions``: If present, connection pool options to use when creating a pool; - both `standard ConnectionPoolOptions `__ - and the following test-specific options are allowed: - - - ``backgroundThreadIntervalMS``: A time interval between the end of a - `Background Thread Run `__ - and the beginning of the next Run. If a Connection Pool does not implement a Background Thread, the Test Runner MUST ignore the option. - If the option is not specified, an implementation is free to use any value it finds reasonable. - - Possible values (0 is not allowed): - - - A negative value: never begin a Run. - - A positive value: the interval between Runs in milliseconds. - -- ``operations``: A list of operations to perform. All operations support the following fields: - - - ``name``: A string describing which operation to issue. - - ``thread``: The name of the thread in which to run this operation. If not specified, runs in the default thread - -- ``error``: Indicates that the main thread is expected to error during this test. An error may include of the following fields: - - - ``type``: the type of error emitted - - ``message``: the message associated with that error - - ``address``: Address of pool emitting error - -- ``events``: An array of all connection monitoring events expected to occur while running ``operations``. An event may contain any of the following fields - - - ``type``: The type of event emitted - - ``address``: The address of the pool emitting the event - - ``connectionId``: The id of a connection associated with the event - - ``options``: Options used to create the pool - - ``reason``: A reason giving more information on why the event was emitted - -- ``ignore``: An array of event names to ignore - -Valid Unit Test Operations are the following: - -- ``start(target)``: Starts a new thread named ``target`` - - - ``target``: The name of the new thread to start - -- ``wait(ms)``: Sleep the current thread for ``ms`` milliseconds - - - ``ms``: The number of milliseconds to sleep the current thread for - -- ``waitForThread(target)``: wait for thread ``target`` to finish executing. Propagate any errors to the main thread. - - - ``target``: The name of the thread to wait for. - -- ``waitForEvent(event, count, timeout)``: block the current thread until ``event`` has occurred ``count`` times - - - ``event``: The name of the event - - ``count``: The number of times the event must occur (counting from the start of the test) - - ``timeout``: If specified, time out with an error after waiting for this many milliseconds without seeing the required events - -- ``label = pool.checkOut()``: call ``checkOut`` on pool, returning the checked out connection - - - ``label``: If specified, associate this label with the returned connection, so that it may be referenced in later operations - -- ``pool.checkIn(connection)``: call ``checkIn`` on pool - - - ``connection``: A string label identifying which connection to check in. Should be a label that was previously set with ``checkOut`` - -- ``pool.clear()``: call ``clear`` on Pool -- ``pool.close()``: call ``close`` on Pool -- ``pool.ready()``: call ``ready`` on Pool - - -Integration Test Format -======================= - -The integration test format is identical to the unit test format with -the addition of the following fields to each test: - -- ``runOn`` (optional): An array of server version and/or topology requirements - for which the tests can be run. If the test environment satisfies one or more - of these requirements, the tests may be executed; otherwise, this test should - be skipped. If this field is omitted, the tests can be assumed to have no - particular requirements and should be executed. Each element will have some or - all of the following fields: - - - ``minServerVersion`` (optional): The minimum server version (inclusive) - required to successfully run the tests. If this field is omitted, it should - be assumed that there is no lower bound on the required server version. - - - ``maxServerVersion`` (optional): The maximum server version (inclusive) - against which the tests can be run successfully. If this field is omitted, - it should be assumed that there is no upper bound on the required server - version. - -- ``failPoint``: optional, a document containing a ``configureFailPoint`` - command to run against the endpoint being used for the test. - -- ``poolOptions.appName`` (optional): appName attribute to be set in connections, which will be affected by the fail point. - -Spec Test Match Function -======================== - -The definition of MATCH or MATCHES in the Spec Test Runner is as follows: - -- MATCH takes two values, ``expected`` and ``actual`` -- Notation is "Assert [actual] MATCHES [expected] -- Assertion passes if ``expected`` is a subset of ``actual``, with the values ``42`` and ``"42"`` acting as placeholders for "any value" - -Pseudocode implementation of ``actual`` MATCHES ``expected``: - -:: - - If expected is "42" or 42: - Assert that actual exists (is not null or undefined) - Else: - Assert that actual is of the same JSON type as expected - If expected is a JSON array: - For every idx/value in expected: - Assert that actual[idx] MATCHES value - Else if expected is a JSON object: - For every key/value in expected - Assert that actual[key] MATCHES value - Else: - Assert that expected equals actual - -Unit Test Runner: -================= - -For the unit tests, the behavior of a Connection is irrelevant beyond the need to asserting ``connection.id``. Drivers MAY use a mock connection class for testing the pool behavior in unit tests - -For each YAML file with ``style: unit``: - -- Create a Pool ``pool``, subscribe and capture any Connection Monitoring events emitted in order. - - - If ``poolOptions`` is specified, use those options to initialize both pools - - The returned pool must have an ``address`` set as a string value. - -- Process each ``operation`` in ``operations`` (on the main thread) - - - If a ``thread`` is specified, the main thread MUST schedule the operation to execute in the corresponding thread. Otherwise, execute the operation directly in the main thread. - -- If ``error`` is presented - - - Assert that an actual error ``actualError`` was thrown by the main thread - - Assert that ``actualError`` MATCHES ``error`` - -- Else: - - - Assert that no errors were thrown by the main thread - -- calculate ``actualEvents`` as every Connection Event emitted whose ``type`` is not in ``ignore`` -- if ``events`` is not empty, then for every ``idx``/``expectedEvent`` in ``events`` - - - Assert that ``actualEvents[idx]`` exists - - Assert that ``actualEvents[idx]`` MATCHES ``expectedEvent`` - - -It is important to note that the ``ignore`` list is used for calculating ``actualEvents``, but is NOT used for the ``waitForEvent`` command - -Integration Test Runner -======================= - -The steps to run the integration tests are the same as those used to run the -unit tests with the following modifications: - -- The integration tests MUST be run against an actual endpoint. If the - deployment being tested contains multiple endpoints, then the runner MUST - only use one of them to run the tests against. - -- For each test, if `failPoint` is specified, its value is a - ``configureFailPoint`` command. Run the command on the admin database of the - endpoint being tested to enable the fail point. - -- At the end of each test, any enabled fail point MUST be disabled to avoid - spurious failures in subsequent tests. The fail point may be disabled like - so:: - - db.adminCommand({ - configureFailPoint: , - mode: "off" - }); - - -Prose Tests -=========== - -The following tests have not yet been automated, but MUST still be tested - -#. All ConnectionPoolOptions MUST be specified at the MongoClient level -#. All ConnectionPoolOptions MUST be the same for all pools created by a MongoClient -#. A user MUST be able to specify all ConnectionPoolOptions via a URI string -#. A user MUST be able to subscribe to Connection Monitoring Events in a manner idiomatic to their language and driver -#. When a check out attempt fails because connection set up throws an error, - assert that a ConnectionCheckOutFailedEvent with reason="connectionError" is emitted. diff --git a/testdata/connection-monitoring-and-pooling/connection-monitoring-and-pooling.rst b/testdata/connection-monitoring-and-pooling/connection-monitoring-and-pooling.rst deleted file mode 100644 index b81b862836..0000000000 --- a/testdata/connection-monitoring-and-pooling/connection-monitoring-and-pooling.rst +++ /dev/null @@ -1,741 +0,0 @@ -================================= -Connection Monitoring and Pooling -================================= - -:Title: Connection Monitoring and Pooling -:Author: Dan Aprahamian -:Advisory Group: Jeff Yemin, Matt Broadstone -:Approvers: Bernie Hackett, Dan Pasette, Jeff Yemin, Matt Broadstone, Sam Rossi, Scott L'Hommedieu -:Status: Accepted -:Type: Standards -:Minimum Server Version: N/A -:Last Modified: June 11, 2019 -:Version: 1.1.0 - -.. contents:: - -Abstract -======== - -Drivers currently support a variety of options that allow users to configure connection pooling behavior. Users are confused by drivers supporting different subsets of these options. Additionally, drivers implement their connection pools differently, making it difficult to design cross-driver pool functionality. By unifying and codifying pooling options and behavior across all drivers, we will increase user comprehension and code base maintainability. - -META -==== - -The keywords “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”, “SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be interpreted as described in `RFC 2119 `_. - -Definitions -=========== - -Endpoint -~~~~~~~~ - -For convenience, an Endpoint refers to either a **mongod** or **mongos** instance. - -Thread -~~~~~~ - -For convenience, a Thread refers to: - -- A shared-address-space process (a.k.a. a thread) in multi-threaded drivers -- An Execution Frame / Continuation in asynchronous drivers -- A goroutine in Go - -Behavioral Description -====================== - -Which Drivers this applies to -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This specification is solely concerned with drivers that implement a connection pool. A driver SHOULD implement a connection pool, but is not required to. - -Connection Pool Options -~~~~~~~~~~~~~~~~~~~~~~~ - -All drivers that implement a connection pool MUST implement and conform to the same MongoClient options. There can be slight deviation in naming to make the options idiomatic to the driver language. - -Connection Pool Behaviors -~~~~~~~~~~~~~~~~~~~~~~~~~ - -All driver connection pools MUST provide an API that allows the driver to check out a connection, check in a connection back to the pool, and clear all connections in the pool. This API is for internal use only, and SHOULD NOT be documented as a public API. - -Connection Pool Monitoring -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -All drivers that implement a connection pool MUST provide an API that allows users to subscribe to events emitted from the pool. - -Detailed Design -=============== - -.. _connection-pool-options-1: - -Connection Pool Options -~~~~~~~~~~~~~~~~~~~~~~~ - -Drivers that implement a Connection Pool MUST support the following ConnectionPoolOptions: - -.. code:: typescript - - interface ConnectionPoolOptions { - /** - * The maximum number of connections that may be associated - * with a pool at a given time. This includes in use and - * available connections. - * If specified, MUST be an integer >= 0. - * A value of 0 means there is no limit. - * Defaults to 100. - */ - maxPoolSize?: number; - - /** - * The minimum number of connections that MUST exist at any moment - * in a single connection pool. - * If specified, MUST be an integer >= 0. If maxPoolSize is > 0 - * then minPoolSize must be <= maxPoolSize - * Defaults to 0. - */ - minPoolSize?: number; - - /** - * The maximum amount of time a connection should remain idle - * in the connection pool before being marked idle. - * If specified, MUST be a number >= 0. - * A value of 0 means there is no limit. - * Defaults to 0. - */ - maxIdleTimeMS?: number; - } - -Additionally, Drivers that implement a Connection Pool MUST support the following ConnectionPoolOptions UNLESS that driver meets ALL of the following conditions: - -- The driver/language currently has an idiomatic timeout mechanism implemented -- The timeout mechanism conforms to `the aggressive requirement of timing out a thread in the WaitQueue <#w1dcrm950sbn>`__ - -.. code:: typescript - - interface ConnectionPoolOptions { - /** - * The maximum amount of time a thread can wait for a connection - * to become available. - * If specified, MUST be a number >= 0. - * A value of 0 means there is no limit. - * Defaults to 0. - */ - waitQueueTimeoutMS?: number; - } - -These options MUST be specified at the MongoClient level, and SHOULD be named in a manner idiomatic to the driver's language. All connection pools created by a MongoClient MUST use the same ConnectionPoolOptions. - -When parsing a mongodb connection string, a user MUST be able to specify these options using the default names specified above. - -Deprecated Options ------------------- - -The following ConnectionPoolOptions are considered deprecated. They MUST NOT be implemented if they do not already exist in a driver, and they SHOULD be deprecated and removed from drivers that implement them as early as possible: - -.. code:: typescript - - interface ConnectionPoolOptions { - /** - * The maximum number of threads that can simultaneously wait - * for a connection to become available. - */ - waitQueueSize?: number; - - /** - * An alternative way of setting waitQueueSize, it specifies - * the maximum number of threads that can wait per connection. - * waitQueueSize === waitQueueMultiple \* maxPoolSize - */ - waitQueueMultiple?: number - } - -Connection Pool Members -~~~~~~~~~~~~~~~~~~~~~~~ - -Connection ----------- - -A driver-defined wrapper around a single TCP/IP connection to an Endpoint. A Connection has the following properties: - -- **Single Endpoint:** A Connection MUST be associated with a single Endpoint. A Connection MUST NOT be associated with multiple Endpoints. -- **Single Lifetime:** A Connection MUST NOT be used after it is closed. -- **Single Owner:** A Connection MUST belong to exactly one Pool, and MUST NOT be shared across multiple pools -- **Single Track:** A Connection MUST limit itself to one request / response at a time. A Connection MUST NOT multiplex/pipeline requests to an Endpoint. -- **Monotonically Increasing ID:** A Connection MUST have an ID number associated with it. Connection IDs within a Pool MUST be assigned in order of creation, starting at 1 and increasing by 1 for each new Connection. -- **Valid Connection:** A connection MUST NOT be checked out of the pool until it has successfully and fully completed a MongoDB Handshake and Authentication as specified in the `Handshake `__, `OP_COMPRESSED `__, and `Authentication `__ specifications. -- **Perishable**: it is possible for a connection to become **Perished**. A connection is considered perished if any of the following are true: - - - **Stale:** The connection's generation does not match the generation of the parent pool - - **Idle:** The connection is currently available and readyToUse, and has been for longer than **maxIdleTimeMS**. - - **Errored:** The connection has experienced an error that indicates the connection is no longer recommended for use. Examples include, but are not limited to: - - - Network Error - - Network Timeout - - Endpoint closing the connection - - Driver-Side Timeout - - Wire-Protocol Error - -.. code:: typescript - - interface Connection { - /** - * An id number associated with the connection - */ - id: number; - - /** - * The address of the pool that owns this connection - */ - address: string; - - /** - * An integer representing the “generation” of the pool - * when this connection was created - */ - generation: number; - } - -WaitQueue ---------- - -A concept that represents pending requests for connections. When a thread requests a Connection from a Pool, the thread enters the Pool's WaitQueue. A thread stays in the WaitQueue until it either receives a Connection or times out. A WaitQueue has the following traits: - -- **Thread-Safe**: When multiple threads attempt to enter or exit a WaitQueue, they do so in a thread-safe manner. -- **Ordered/Fair**: When connections are made available, they are issued out to threads in the order that the threads entered the WaitQueue. -- **Timeout aggressively:** If **waitQueueTimeoutMS** is set, members of a WaitQueue MUST timeout if they are enqueued for longer than waitQueueTimeoutMS. Members of a WaitQueue MUST timeout aggressively, and MUST leave the WaitQueue immediately upon timeout. - -The implementation details of a WaitQueue are left to the driver. -Example implementations include: - -- A fair Semaphore -- A Queue of callbacks - -Connection Pool ---------------- - -A driver-defined entity that encapsulates all non-monitoring connections associated with a single Endpoint. The pool has the following properties: - -- **Thread Safe:** All Pool behaviors MUST be thread safe. -- **Not Fork-Safe:** A Pool is explicitly not fork-safe. If a Pool detects that is it being used by a forked process, it MUST immediately clear itself and update it's pid -- **Single Owner:** A Pool MUST be associated with exactly one Endpoint, and MUST NOT be shared between Endpoints. -- **Emit Events:** A Pool MUST emit pool events when dictated by this spec (see `Connection Pool Monitoring `__). Users MUST be able to subscribe to emitted events in a manner idiomatic to their language and driver. -- **Closeable:** A Pool MUST be able to be manually closed. When a Pool is closed, the following behaviors change: - - - Checking in a Connection to the Pool automatically closes the connection - - Attempting to check out a Connection from the Pool results in an Error - -- **Capped:** a pool is capped if **maxPoolSize** is set to a non-zero value. If a pool is capped, then it's total number of connections (including available and in use) MUST NOT exceed **maxPoolSize** - -.. code:: typescript - - interface ConnectionPool { - /** - * The Queue of threads waiting for a connection to be available - */ - waitQueue: WaitQueue; - - /** - * A generation number representing the SDAM generation of the pool - */ - generation: number; - - /** - * An integer expressing how many total connections - * (active + in use) the pool currently has - */ - totalConnectionCount: number; - - /** - * An integer expressing how many connections are currently - * available in the pool. - */ - availableConnectionCount: number; - - /** - * Returns a connection for use - */ - checkOut(): Connection; - - /** - * Check in a connection back to the connection pool - */ - checkIn(connection: Connection): void; - - /** - * Mark all current connections as stale. - */ - clear(): void; - - /** - * Closes the pool, preventing the pool from creating and returning new Connections - */ - close(): void; - } - -.. _connection-pool-behaviors-1: - -Connection Pool Behaviors -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Creating a Connection Pool --------------------------- - -Pool creation is mostly an implementation detail specific to the driver language. If minPoolSize is set, the pool MUST immediately create enough connections such that totalConnections >= minPoolSize. These connections MUST be created in a non-blocking manner. Drivers SHOULD additionally connect these connections if it is possible to do so in a non-blocking manner, either via the use of a Background Thread or via asynchronous I/O. - -.. code:: - - set generation to 0 - emit PoolCreatedEvent - if minPoolSize is set: - while totalConnectionCount < minPoolSize: - create connection - # If this can be done without blocking, ensure that connection - # is ready to use - - -Closing a Connection Pool -------------------------- - -When a pool is closed, it MUST first close all available connections in that pool. This results in the following behavior changes: - -- In use connections MUST be closed when they are checked in to the closed pool. -- Attempting to check out a connection MUST result in an error. - -.. code:: - - mark pool as CLOSED - for connection in availableConnections: - close connection - emit PoolClosedEvent - -Creating a Connection (Internal Implementation) ------------------------------------------------ - -When creating a connection, the initial Connection is in a “set up” state. This only creates a “virtual” connection, and performs no I/O. The Connection MUST NOT be made available until it has completed its connection as described `here <#connecting-a-connection-internal-implementation>`__. - -.. code:: - - connection = new Connection() - mark connection as available - emit ConnectionCreatedEvent - return connection - -Connecting a Connection (Internal Implementation) -------------------------------------------------- - -Before a connection can be returned outside of the pool, it must be “set up”. This process involves performing the initial handshake, handling OP_COMPRESSED, and performing authentication. - -.. code:: - - try: - connect connection via TCP / TLS - perform connection handshake - handle OP_COMPRESSED - perform connection authentication - mark connection as readyToUse - emit ConnectionReadyEvent - return connection - except error: - close connection - throw error # Propagate error in manner idiomatic to language. - - -Closing a Connection (Internal Implementation) ----------------------------------------------- - -When a Connection is closed, it MUST first be marked as closed, removing it from being counted as available, in use, or setup. Once the connection is marked as closed, the Connection can perform whatever teardown is necessary. The Driver SHOULD perform the teardown in a non-blocking manner. - -.. code:: - - mark connection as closed - emit ConnectionClosedEvent - - # The following can happen at a later time - connection.close() - -Checking Out a Connection -------------------------- - -A Pool MUST have a method of allowing the driver to check out a Connection. Checking out a Connection involves entering the WaitQueue, and waiting for a Connection to become available. If the thread times out in the WaitQueue, an error is thrown. - -If, in the process of iterating available connections in the pool by the checkOut method, a perished connection is encountered, such a connection MUST be closed and the iteration of available connections MUST continue until either a non-perished available connection is found or the list of available connections is exhausted. If no connections are available and the total number of connections is less than maxPoolSize, the pool MUST create and return a new Connection. - -If the pool is closed, any attempt to check out a connection MUST throw an Error, and any items in the waitQueue MUST be removed from the waitQueue and throw an Error. - -If minPoolSize is set, the Connection Pool must have at least minPoolSize total connections. If the pool does not implement a background thread as specified here, the checkOut method is responsible for ensuring this requirement. - -A Connection MUST NOT be checked out until it is readyToUse. In addition, the Pool MUST NOT block other threads from checking out connections while waiting for a connection to be readyToUse. - - -.. code:: - - connection = Null - emit ConnectionCheckOutStartedEvent - try: - enter WaitQueue - wait until at top of wait queue - # Note that in a lock-based implementation of the wait queue would - # only allow one thread in the following block at a time - while connection is Null: - if a connection is available: - while connection is Null and a connection is available: - connection = next available connection - if connection is perished: - close connection - connection = Null - else if totalConnectionCount < maxPoolSize: - connection = create connection - # If there is no background thread, the pool MUST ensure that - # at least minPoolSize connections are in the pool. - # This SHOULD be done in a non-blocking manner - while totalConnectionCount < minPoolSize: - create connection - except pool is closed: - emit ConnectionCheckOutFailedEvent(reason="poolClosed") - throw PoolClosedError - except timeout: - emit ConnectionCheckOutFailedEvent(reason="timeout") - throw WaitQueueTimeoutError - finally: - # This must be done in all drivers - leave wait queue - - # If the connection has not been connected yet, the connection - # (TCP, TLS, handshake, compression, and auth) must be performed - # before the connection is returned. This MUST NOT block other threads - # from acquiring connections. - if connection is not readyToUse: - try: - set up connection - except set up connection error: - emit ConnectionCheckOutFailedEvent(reason="error") - throw - - mark connection as in use - emit ConnectionCheckedOutEvent - return connection - -Checking In a Connection ------------------------- - -A Pool MUST have a method of allowing the driver to check in a Connection. The driver MUST NOT be allowed to check in a Connection to a Pool that did not create that Connection, and MUST throw an Error if this is attempted. - -When the Connection is checked in, it is closed if any of the following are true: - -- The connection is perished. -- The pool has been closed. - -Otherwise, the connection is marked as available. - -.. code:: - - emit ConnectionCheckedInEvent - if connection is perished OR pool is closed: - close connection - else: - mark connection as available - -Clearing a Connection Pool --------------------------- - -A Pool MUST have a method of clearing all Connections when instructed. Rather than iterating through every Connection, this method should simply increment the generation of the Pool, implicitly marking all current connections as stale. The checkOut and checkIn algorithms will handle clearing out stale connections. If a user is subscribed to Connection Monitoring events, a PoolClearedEvent MUST be emitted after incrementing the generation. - -Forking -------- - -A Connection is explicitly not fork-safe. The proper behavior in the case of a fork is to ResetAfterFork by: - -- clear all Connection Pools in the child process -- closing all Connections in the child-process. - -Drivers that support forking MUST document that connections to an Endpoint are not fork-safe, and document the proper way to ResetAfterFork in the driver. - -Drivers MAY aggressively ResetAfterFork if the driver detects it has been forked. - -Optional Behaviors ------------------- - -The following features of a Connection Pool SHOULD be implemented if they make sense in the driver and driver's language. - -Background Thread -^^^^^^^^^^^^^^^^^ - -A Pool SHOULD have a background Thread that is responsible for -monitoring the state of all available connections. This background -thread SHOULD - -- Create and connect connections to ensure that the pool always satisfies **minPoolSize** -- Remove and close perished available connections. - -withConnection -^^^^^^^^^^^^^^ - -A Pool SHOULD implement a scoped resource management mechanism idiomatic to their language to prevent Connections from not being checked in. Examples include `Python's "with" statement `__ and `C#'s "using" statement `__. If implemented, drivers SHOULD use this method as the default method of checking out and checking in Connections. - -.. _connection-pool-monitoring-1: - -Connection Pool Monitoring -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -All drivers that implement a connection pool MUST provide an API that allows users to subscribe to events emitted from the pool. If a user subscribes to Connection Monitoring events, these events MUST be emitted when specified in “Connection Pool Behaviors”. Events SHOULD be created and subscribed to in a manner idiomatic to their language and driver. - -Events ------- - - -.. code:: typescript - - /** - * Emitted when a Connection Pool is created - */ - interface PoolCreatedEvent { - /** - * The ServerAddress of the Endpoint the pool is attempting to connect to. - */ - address: string; - - /** - * Any non-default pool options that were set on this Connection Pool. - */ - options: {...} - } - - /** - * Emitted when a Connection Pool is cleared - */ - interface PoolClearedEvent { - /** - * The ServerAddress of the Endpoint the pool is attempting to connect to. - */ - address: string; - } - - /** - * Emitted when a Connection Pool is closed - */ - interface PoolClosedEvent { - /** - * The ServerAddress of the Endpoint the pool is attempting to connect to. - */ - address: string; - } - - /** - * Emitted when a Connection Pool creates a Connection object. - * NOTE: This does not mean that the connection is ready for use. - */ - interface ConnectionCreatedEvent { - /** - * The ServerAddress of the Endpoint the pool is attempting to connect to. - */ - address: string; - - /** - * The ID of the Connection - */ - connectionId: number; - } - - /** - * Emitted when a Connection has finished its setup, and is now ready to use - */ - interface ConnectionReadyEvent { - /** - * The ServerAddress of the Endpoint the pool is attempting to connect to. - */ - address: string; - - /** - * The ID of the Connection - */ - connectionId: number; - } - - /** - * Emitted when a Connection Pool closes a Connection - */ - interface ConnectionClosedEvent { - /** - * The ServerAddress of the Endpoint the pool is attempting to connect to. - */ - address: string; - - /** - * The ID of the Connection - */ - connectionId: number; - - /** - * A reason explaining why this connection was closed. - * Can be implemented as a string or enum. - * Current valid values are: - * - "stale": The pool was cleared, making the connection no longer valid - * - "idle": The connection became stale by being available for too long - * - "error": The connection experienced an error, making it no longer valid - * - "poolClosed": The pool was closed, making the connection no longer valid - */ - reason: string|Enum; - } - - /** - * Emitted when the driver starts attempting to check out a connection - */ - interface ConnectionCheckOutStartedEvent { - /** - * The ServerAddress of the Endpoint the pool is attempting - * to connect to. - */ - address: string; - } - - /** - * Emitted when the driver's attempt to check out a connection fails - */ - interface ConnectionCheckOutFailedEvent { - /** - * The ServerAddress of the Endpoint the pool is attempting to connect to. - */ - address: string; - - /** - * A reason explaining why connection check out failed. - * Can be implemented as a string or enum. - * Current valid values are: - * - "poolClosed": The pool was previously closed, and cannot provide new connections - * - "timeout": The connection check out attempt exceeded the specified timeout - * - "connectionError": The connection check out attempt experienced an error while setting up a new connection - */ - reason: string|Enum; - } - - /** - * Emitted when the driver successfully checks out a Connection - */ - interface ConnectionCheckedOutEvent { - /** - * The ServerAddress of the Endpoint the pool is attempting to connect to. - */ - address: string; - - /** - * The ID of the Connection - */ - connectionId: number; - } - - /** - * Emitted when the driver checks in a Connection back to the Connection Pool - */ - interface ConnectionCheckedInEvent { - /** - * The ServerAddress of the Endpoint the pool is attempting to connect to. - */ - address: string; - - /** - * The ID of the Connection - */ - connectionId: number; - } - -Connection Pool Errors -~~~~~~~~~~~~~~~~~~~~~~ - -A connection pool throws errors in specific circumstances. These Errors -MUST be emitted by the pool. Errors SHOULD be created and dispatched in -a manner idiomatic to the Driver and Language. - -.. code:: typescript - - /** - * Thrown when the driver attempts to check out a - * Connection from a closed Connection Pool - */ - interface PoolClosedError { - message: 'Attempted to check out a connection from closed connection pool'; - address: ; - } - - /** - * Thrown when a driver times out when attempting to check out - * a Connection from a Pool - */ - interface WaitQueueTimeoutError { - message: 'Timed out while checking out a connection from connection pool'; - address: ; - } - -Test Plan -========= - -See `tests/README.rst `_ - -Design Rationale -================ - -Why do we set minPoolSize across all members of a replicaSet, when most traffic will be against a Primary? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Currently, we are attempting to codify our current pooling behavior with minimal changes, and minPoolSize is currently uniform across all members of a replicaSet. This has the benefit of offsetting connection swarming during a Primary Step-Down, which will be further addressed in our `Advanced Pooling Behaviors <#advanced-pooling-behaviors>`__. - -Why do we have separate ConnectionCreated and ConnectionReady events, but only one ConnectionClosed event? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -ConnectionCreated and ConnectionReady each involve different state changes in the pool. - -- ConnectionCreated adds a new “setting-up” connection, meaning the totalConnectionCount increases by one -- ConnectionReady establishes that the connection is ready for use, meaning the availableConnectionCount increases by one - -ConnectionClosed indicates that the connection is no longer a member of the pool, decrementing totalConnectionCount and potentially availableConnectionCount. After this point, the connection is no longer a part of the pool. Further hypothetical events would not indicate a change to the state of the pool, so they are not specified here. - -Why are waitQueueSize and waitQueueMultiple deprecated? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -These options are not implemented across many drivers. Additionally, they have the effect of prioritizing older requests over newer requests, which is not necessarily the behavior that users want. They can also can result in cases where queue access oscillates back and forth instead of restricting access until the size drops. We may eventually pursue an alternative solutions in `Advanced Pooling Behaviors <#advanced-pooling-behaviors>`__. - -Why is waitQueueTimeoutMS optional for some drivers? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -We are anticipating eventually introducing a single client-side timeout mechanism, making us hesitant to introduce another granular timeout control. Therefore, if a driver/language already has an idiomatic way to implement their timeouts, they should leverage that mechanism over implementing waitQueueTimeoutMS. - - -Backwards Compatibility -======================= - -As mentioned in `Deprecated Options <#deprecated-options>`__, some drivers currently implement the options ``waitQueueSize`` and/or ``waitQueueMultiple``. These options will need to be deprecated and phased out of the drivers that have implemented them. - - -Reference Implementations -========================= - -- JAVA (JAVA-3079) -- RUBY (RUBY-1560) - -Future Development -================== - -SDAM -~~~~ - -This specification does not dictate how SDAM Monitoring connections are managed. SDAM specifies that “A monitor SHOULD NOT use the client's regular connection pool”. Some possible solutions for this include: - -- Having each Endpoint representation in the driver create and manage a separate dedicated Connection for monitoring purposes -- Having each Endpoint representation in the driver maintain a separate pool of maxPoolSize 1 for monitoring purposes. -- Having each Pool maintain a dedicated connection for monitoring purposes, with an API to expose that connection. - -Advanced Pooling Behaviors -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This spec does not address any advanced pooling behaviors like predictive pooling, aggressive connection creation, or handling high request volume. Future work may address this. - -Add support for OP_MSG exhaustAllowed -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Exhaust Cursors may require changes to how we close connections in the future, specifically to add a way to close and remove from its pool a connection which has unread exhaust messages. - - -Change log -========== - -:2019-06-06: Add "connectionError" as a valid reason for ConnectionCheckOutFailedEvent diff --git a/testdata/connection-string/README.rst b/testdata/connection-string/README.rst deleted file mode 100644 index f221600b2d..0000000000 --- a/testdata/connection-string/README.rst +++ /dev/null @@ -1,73 +0,0 @@ -======================= -Connection String Tests -======================= - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the Connection String Spec. - -As the spec is primarily concerned with parsing the parts of a URI, these tests -do not focus on host and option validation. Where necessary, the tests use -options known to be (un)supported by drivers to assert behavior such as issuing -a warning on repeated option keys. As such these YAML tests are in no way a -replacement for more thorough testing. However, they can provide an initial -verification of your implementation. - -Version -------- - -Files in the "specifications" repository have no version scheme. They are not -tied to a MongoDB server version. - -Format ------- - -Each YAML file contains an object with a single ``tests`` key. This key is an -array of test case objects, each of which have the following keys: - -- ``description``: A string describing the test. -- ``uri``: A string containing the URI to be parsed. -- ``valid:`` A boolean indicating if the URI should be considered valid. -- ``warning:`` A boolean indicating whether URI parsing should emit a warning - (independent of whether or not the URI is valid). -- ``hosts``: An array of host objects, each of which have the following keys: - - - ``type``: A string denoting the type of host. Possible values are "ipv4", - "ip_literal", "hostname", and "unix". Asserting the type is *optional*. - - ``host``: A string containing the parsed host. - - ``port``: An integer containing the parsed port number. -- ``auth``: An object containing the following keys: - - - ``username``: A string containing the parsed username. For auth mechanisms - that do not utilize a password, this may be the entire ``userinfo`` token - (as discussed in `RFC 2396 `_). - - ``password``: A string containing the parsed password. - - ``db``: A string containing the parsed authentication database. For legacy - implementations that support namespaces (databases and collections) this may - be the full namespace eg: ``.`` -- ``options``: An object containing key/value pairs for each parsed query string - option. - -If a test case includes a null value for one of these keys (e.g. ``auth: ~``, -``port: ~``), no assertion is necessary. This both simplifies parsing of the -test files (keys should always exist) and allows flexibility for drivers that -might substitute default values *during* parsing (e.g. omitted ``port`` could be -parsed as 27017). - -The ``valid`` and ``warning`` fields are boolean in order to keep the tests -flexible. We are not concerned with asserting the format of specific error or -warnings messages strings. - -Use as unit tests -================= - -Testing whether a URI is valid or not should simply be a matter of checking -whether URI parsing (or MongoClient construction) raises an error or exception. -Testing for emitted warnings may require more legwork (e.g. configuring a log -handler and watching for output). - -Not all drivers may be able to directly assert the hosts, auth credentials, and -options. Doing so may require exposing the driver's URI parsing component. - -The file valid-db-with-dotted-name.yml is a special case for testing drivers -that allow dotted namespaces, instead of only database names, in the Auth -Database portion of the URI. diff --git a/testdata/convenient-transactions/README.rst b/testdata/convenient-transactions/README.rst deleted file mode 100644 index eabf244a6e..0000000000 --- a/testdata/convenient-transactions/README.rst +++ /dev/null @@ -1,220 +0,0 @@ -===================================== -Convenient API for Transactions Tests -===================================== - -.. contents:: - ----- - -Introduction -============ - -The YAML and JSON files in this directory are platform-independent tests that -drivers can use to prove their conformance to the Convenient API for -Transactions spec. They are designed with the intention of sharing some -test-runner code with the CRUD, Command Monitoring, and Transaction spec tests. - -Several prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Server Fail Point -================= - -See: `Server Fail Point <../../transactions/tests#server-fail-point>`_ in the -Transactions spec test suite. - -Test Format -=========== - -Each YAML file has the following keys: - -- ``runOn`` (optional): An array of server version and/or topology requirements - for which the tests can be run. If the test environment satisfies one or more - of these requirements, the tests may be executed; otherwise, this file should - be skipped. If this field is omitted, the tests can be assumed to have no - particular requirements and should be executed. Each element will have some or - all of the following fields: - - - ``minServerVersion`` (optional): The minimum server version (inclusive) - required to successfully run the tests. If this field is omitted, it should - be assumed that there is no lower bound on the required server version. - - - ``maxServerVersion`` (optional): The maximum server version (inclusive) - against which the tests can be run successfully. If this field is omitted, - it should be assumed that there is no upper bound on the required server - version. - - - ``topology`` (optional): An array of server topologies against which the - tests can be run successfully. Valid topologies are "single", "replicaset", - and "sharded". If this field is omitted, the default is all topologies (i.e. - ``["single", "replicaset", "sharded"]``). - -- ``database_name`` and ``collection_name``: The database and collection to use - for testing. - -- ``data``: The data that should exist in the collection under test before each - test run. - -- ``tests``: An array of tests that are to be run independently of each other. - Each test will have some or all of the following fields: - - - ``description``: The name of the test. - - - ``skipReason`` (optional): If present, the test should be skipped and the - string value will specify a reason. - - - ``failPoint`` (optional): The ``configureFailPoint`` command document to run - to configure a fail point on the primary server. This option and - ``useMultipleMongoses: true`` are mutually exclusive. - - - ``useMultipleMongoses`` (optional): If ``true``, the MongoClient for this - test should be initialized with multiple mongos seed addresses. If ``false`` - or omitted, only a single mongos address should be specified. This field has - no effect for non-sharded topologies. - - - ``clientOptions`` (optional): Names and values of options to pass to - ``MongoClient()``. - - - ``sessionOptions`` (optional): Names and values of options to pass to - ``MongoClient.startSession()``. - - - ``operations``: Array of documents, each describing an operation to be - executed. Each document has the following fields: - - - ``name``: The name of the operation on ``object``. - - - ``object``: The name of the object on which to perform the operation. The - value will be either "collection" or "session0". - - - ``arguments`` (optional): Names and values of arguments to pass to the - operation. - - - ``result`` (optional): The return value from the operation. This will - correspond to an operation's result object as defined in the CRUD - specification. If the operation is expected to return an error, the - ``result`` is a single document that has one or more of the following - fields: - - - ``errorContains``: A substring of the expected error message. - - - ``errorCodeName``: The expected "codeName" field in the server - error response. - - - ``errorLabelsContain``: A list of error label strings that the - error is expected to have. - - - ``errorLabelsOmit``: A list of error label strings that the - error is expected not to have. - - - ``expectations`` (optional): List of command-started events. - - - ``outcome``: Document describing the return value and/or expected state of - the collection after the operation is executed. Contains the following - fields: - - - ``collection``: - - - ``data``: The data that should exist in the collection after the - operations have run. - -``withTransaction`` Operation -````````````````````````````` - -These tests introduce a ``withTransaction`` operation, which may have the -following fields: - -- ``callback``: Document containing the following field: - - - ``operations``: Array of documents, each describing an operation to be - executed. Elements in this array will follow the same structure as the - ``operations`` field defined above (and in the CRUD and Transactions specs). - - Note that drivers are expected to evaluate ``error`` and ``result`` - assertions when executing operations within ``callback.operations``. - -- ``options`` (optional): Names and values of options to pass to - ``withTransaction()``, which will in turn be used for ``startTransaction()``. - -Use as Integration Tests -======================== - -Testing against a replica set will require server version 4.0.0 or later. The -replica set should include a primary, a secondary, and an arbiter. Including a -secondary ensures that server selection in a transaction works properly. -Including an arbiter helps ensure that no new bugs have been introduced related -to arbiters. - -Testing against a sharded cluster will require server version 4.1.6 or later. -A driver that implements support for sharded transactions MUST also run these -tests against a MongoDB sharded cluster with multiple mongoses. Including -multiple mongoses (and initializing the MongoClient with multiple mongos seeds!) -ensures that mongos transaction pinning works properly. - -See: `Use as Integration Tests <../../transactions/tests#use-as-integration-tests>`_ -in the Transactions spec test suite for instructions on executing each test. - -Take note of the following: - -- Most tests will consist of a single "withTransaction" operation to be invoked - on the "session0" object. The ``callback`` argument of that operation will - resemble the ``operations`` array found in transaction spec tests. - -Command-Started Events -`````````````````````` - -See: `Command-Started Events <../../transactions/tests#command-started-events>`_ -in the Transactions spec test suite for instructions on asserting -command-started events. - -Prose Tests -=========== - -Callback Raises a Custom Error -`````````````````````````````` - -Write a callback that raises a custom exception or error that does not include -either UnknownTransactionCommitResult or TransientTransactionError error labels. -Execute this callback using ``withTransaction`` and assert that the callback's -error bypasses any retry logic within ``withTransaction`` and is propagated to -the caller of ``withTransaction``. - -Callback Returns a Value -```````````````````````` - -Write a callback that returns a custom value (e.g. boolean, string, object). -Execute this callback using ``withTransaction`` and assert that the callback's -return value is propagated to the caller of ``withTransaction``. - -Retry Timeout is Enforced -````````````````````````` - -Drivers should test that ``withTransaction`` enforces a non-configurable timeout -before retrying both commits and entire transactions. Specifically, three cases -should be checked: - - * If the callback raises an error with the TransientTransactionError label and - the retry timeout has been exceeded, ``withTransaction`` should propagate the - error to its caller. - * If committing raises an error with the UnknownTransactionCommitResult label, - the error is not a write concern timeout, and the retry timeout has been - exceeded, ``withTransaction`` should propagate the error to its caller. - * If committing raises an error with the TransientTransactionError label and - the retry timeout has been exceeded, ``withTransaction`` should propagate the - error to its caller. This case may occur if the commit was internally retried - against a new primary after a failover and the second primary returned a - NoSuchTransaction error response. - - If possible, drivers should implement these tests without requiring the test - runner to block for the full duration of the retry timeout. This might be done - by internally modifying the timeout value used by ``withTransaction`` with some - private API or using a mock timer. - -Changelog -========= - -:2019-03-01: Add top-level ``runOn`` field to denote server version and/or - topology requirements requirements for the test file. Removes the - ``minServerVersion`` top-level field, which is now expressed within - ``runOn`` elements. - - Add test-level ``useMultipleMongoses`` field. diff --git a/testdata/crud/README.rst b/testdata/crud/README.rst deleted file mode 100644 index 6fd4626c06..0000000000 --- a/testdata/crud/README.rst +++ /dev/null @@ -1,276 +0,0 @@ -========== -CRUD Tests -========== - -.. contents:: - ----- - -Introduction -============ - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the CRUD spec. - -Running these integration tests will require a running MongoDB server or -cluster with server versions 2.6.0 or later. Some tests have specific server -version requirements as noted by the ``runOn`` section, if provided. - -Subdirectories for Test Formats -------------------------------- - -This document describes a legacy format for CRUD tests: legacy-v1, which dates back -to the first version of the CRUD specification. New CRUD tests should be written -in the `unified test format <../../../../unified-test-format/unified-test-format.rst>`_ -and placed under ``unified/``. Until such time that all original tests have been ported -to the unified test format, tests in each format will be grouped in their own subdirectory: - -- ``v1/``: Legacy-v1 format tests -- ``unified/``: Tests using the `unified test format <../../../../unified-test-format/unified-test-format.rst>`_ - -Since some drivers may not have a unified test runner capable of executing tests -in all two formats, segregating tests in this manner will make it easier for -drivers to sync and feed test files to different test runners. - -Legacy-v1 Test Format for Single Operations -------------------------------------------- - -*Note: this section pertains to test files in the "v1" directory.* - -The test format above supports both multiple operations and APM expectations, -and is consistent with the formats used by other specifications. Previously, the -CRUD spec tests used a simplified format that only allowed for executing a -single operation. Notable differences from the legacy-v2 format are as follows: - -- Instead of a ``tests[i].operations`` array, a single operation was defined as - a document in ``tests[i].operation``. That document consisted of only the - ``name``, ``arguments``, and an optional ``object`` field. - -- Instead of ``error`` and ``result`` fields within each element in the - ``tests[i].operations`` array, the single operation's error and result were - defined under the ``tests[i].outcome.error`` and ``tests[i].outcome.result`` - fields. - -- Instead of a top-level ``runOn`` field, server requirements are denoted by - separate top-level ``minServerVersion`` and ``maxServerVersion`` fields. The - minimum server version is an inclusive lower bound for running the test. The - maximum server version is an exclusive upper bound for running the test. If a - field is not present, it should be assumed that there is no corresponding bound - on the required server version. - -The legacy-v1 format should not conflict with the newer, multi-operation format -used by other specs (e.g. Transactions). It is possible to create a unified test -runner capable of executing both legacy formats (as some drivers do). - -Error Assertions for Bulk Write Operations -========================================== - -When asserting errors (e.g. ``errorContains``, ``errorCodeName``) for bulk write -operations, the test harness should inspect the ``writeConcernError`` and/or -``writeErrors`` properties of the bulk write exception. This may not be needed for -``errorContains`` if a driver concatenates all write and write concern error -messages into the bulk write exception's top-level message. - -Test Runner Implementation -========================== - -This section provides guidance for implementing a test runner for legacy-v1 -tests. See the `unified test format spec <../../../../unified-test-format/unified-test-format.rst>`_ for how to run tests under -``unified/``. - -Before running the tests: - -- Create a global MongoClient (``globalMongoClient``) and connect to the server. - This client will be used for executing meta operations, such as checking - server versions and preparing data fixtures. - -For each test file: - -- Using ``globalMongoClient``, check that the current server version satisfies - one of the configurations provided in the top-level ``runOn`` field in the test - file (if applicable). If the - requirements are not satisfied, the test file should be skipped. - -- Determine the collection and database under test, utilizing the top-level - ``collection_name`` and/or ``database_name`` fields if present. - -- For each element in the ``tests`` array: - - - Using ``globalMongoClient``, ensure that the collection and/or database - under test is in a "clean" state, as needed. This may be accomplished by - dropping the database; however, drivers may also decide to drop individual - collections as needed (this may be more performant). - - - If the top-level ``data`` field is present in the test file, insert the - corresponding data into the collection under test using - ``globalMongoClient``. - - - If the the ``failPoint`` field is present, use ``globalMongoClient`` to - configure the fail point on the primary server. See - `Server Fail Point <../../transactions/tests#server-fail-point>`_ in the - Transactions spec test documentation for more information. - - - Create a local MongoClient (``localMongoClient``) and connect to the server. - This client will be used for executing the test case. - - - If ``clientOptions`` is present, those options should be used to create - the client. Drivers MAY merge these options atop existing defaults (e.g. - reduced ``serverSelectionTimeoutMS`` value for faster test failures) at - their own discretion. - - - Activate command monitoring for ``localMongoClient`` and begin capturing - events. Note that some events may need to be filtered out if the driver - uses global listeners or reports internal commands (e.g. ``hello``, legacy - hello, authentication). - - - For each element in the ``operations`` array: - - - Using ``localMongoClient``, select the appropriate ``object`` to execute - the operation. Default to the collection under test if this field is not - present. - - - If ``collectionOptions`` is present, those options should be used to - construct the collection object. - - - Given the ``name`` and ``arguments``, execute the operation on the object - under test. Capture the result of the operation, if any, and observe - whether an error occurred. If an error is encountered that includes a - result (e.g. BulkWriteException), extract the result object. - - - If ``error`` is present and true, assert that the operation encountered an - error. Otherwise, assert that no error was encountered. - - - if ``result`` is present, assert that it matches the operation's result. - - - Deactivate command monitoring for ``localMongoClient``. - - - If the ``expectations`` array is present, assert that the sequence of - emitted CommandStartedEvents from executing the operation(s) matches the - sequence of ``command_started_event`` objects in the ``expectations`` array. - - - If the ``outcome`` field is present, assert the contents of the specified - collection using ``globalMongoClient``. - Note the server does not guarantee that documents returned by a find - command will be in inserted order. This find MUST sort by ``{_id:1}``. - -Evaluating Matches ------------------- - -The expected values for results (e.g. ``result`` for an operation -operation, ``command_started_event.command``, elements in ``outcome.data``) are -written in `Extended JSON <../../extended-json.rst>`_. Drivers may adopt any of -the following approaches to comparisons, as long as they are consistent: - -- Convert ``actual`` to Extended JSON and compare to ``expected`` -- Convert ``expected`` and ``actual`` to BSON, and compare them -- Convert ``expected`` and ``actual`` to native representations, and compare - them - -Extra Fields in Actual Documents -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When comparing ``actual`` and ``expected`` *documents*, drivers should permit -``actual`` documents to contain additional fields not present in ``expected``. -For example, the following documents match: - -- ``expected`` is ``{ "x": 1 }`` -- ``actual`` is ``{ "_id": { "$oid" : "000000000000000000000001" }, "x": 1 }`` - -In this sense, ``expected`` may be a subset of ``actual``. It may also be -helpful to think of ``expected`` as a form of query criteria. The intention -behind this rule is that it is not always feasible for the test to express all -fields in the expected document(s) (e.g. session and cluster time information -in a ``command_started_event.command`` document). - -This rule for allowing extra fields in ``actual`` only applies for values that -correspond to a document. For instance, an actual result of ``[1, 2, 3, 4]`` for -a ``distinct`` operation would not match an expected result of ``[1, 2, 3]``. -Likewise with the ``find`` operation, this rule would only apply when matching -documents *within* the expected result array and actual cursor. - -Note that in the case of result objects for some CRUD operations, ``expected`` -may condition additional, optional fields (see: -`Optional Fields in Expected Result Objects`_). - -Fields that must NOT be present in Actual Documents -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Some command-started events in ``expectations`` include ``null`` values for -optional fields such as ``allowDiskUse``. -Tests MUST assert that the actual command **omits** any field that has a -``null`` value in the expected command. - -Optional Fields in Expected Result Objects -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Some ``expected`` results may include fields that are optional in the CRUD -specification, such as ``insertedId`` (for InsertOneResult), ``insertedIds`` -(for InsertManyResult), and ``upsertedCount`` (for UpdateResult). Drivers that -do not implement these fields should ignore them when comparing ``actual`` with -``expected``. - -Prose Tests -=========== - -The following tests have not yet been automated, but MUST still be tested. - -1. WriteConcernError.details exposes writeConcernError.errInfo --------------------------------------------------------------- - -Test that ``writeConcernError.errInfo`` in a command response is propagated as -``WriteConcernError.details`` (or equivalent) in the driver. - -Using a 4.0+ server, set the following failpoint: - -.. code:: javascript - - { - "configureFailPoint": "failCommand", - "data": { - "failCommands": ["insert"], - "writeConcernError": { - "code": 100, - "codeName": "UnsatisfiableWriteConcern", - "errmsg": "Not enough data-bearing nodes", - "errInfo": { - "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } - } - } - }, - "mode": { "times": 1 } - } - -Then, perform an insert operation and assert that a WriteConcernError occurs and -that its ``details`` property is both accessible and matches the ``errInfo`` -object from the failpoint. - -2. WriteError.details exposes writeErrors[].errInfo ---------------------------------------------------- - -Test that ``writeErrors[].errInfo`` in a command response is propagated as -``WriteError.details`` (or equivalent) in the driver. - -Using a 5.0+ server, create a collection with -`document validation `_ -like so: - -.. code:: javascript - - { - "create": "test", - "validator": { - "x": { $type: "string" } - } - } - -Enable `command monitoring <../../command-monitoring/command-monitoring.rst>`_ -to observe CommandSucceededEvents. Then, insert an invalid document (e.g. -``{x: 1}``) and assert that a WriteError occurs, that its code is ``121`` -(i.e. DocumentValidationFailure), and that its ``details`` property is -accessible. Additionally, assert that a CommandSucceededEvent was observed and -that the ``writeErrors[0].errInfo`` field in the response document matches the -WriteError's ``details`` property. diff --git a/testdata/crud/unified/distinct-hint.json b/testdata/crud/unified/distinct-hint.json new file mode 100644 index 0000000000..2a6869cbe0 --- /dev/null +++ b/testdata/crud/unified/distinct-hint.json @@ -0,0 +1,139 @@ +{ + "description": "distinct-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "7.1.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "distinct-hint-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "distinct-hint-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "distinct with hint string", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectResult": [ + 11 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": { + "_id": 1 + }, + "hint": "_id_" + }, + "commandName": "distinct", + "databaseName": "distinct-hint-tests" + } + } + ] + } + ] + }, + { + "description": "distinct with hint document", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": [ + 11 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "commandName": "distinct", + "databaseName": "distinct-hint-tests" + } + } + ] + } + ] + } + ] +} diff --git a/testdata/crud/unified/distinct-hint.yml b/testdata/crud/unified/distinct-hint.yml new file mode 100644 index 0000000000..9d277616d3 --- /dev/null +++ b/testdata/crud/unified/distinct-hint.yml @@ -0,0 +1,73 @@ +description: "distinct-hint" + +schemaVersion: "1.0" +runOnRequirements: + # https://jira.mongodb.org/browse/SERVER-14227 + # Server supports distinct with hint starting from 7.1.0. + - minServerVersion: "7.1.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name distinct-hint-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +tests: + - description: "distinct with hint string" + operations: + - name: distinct + object: *collection0 + arguments: + fieldName: &fieldName x + filter: &filter { _id: 1 } + hint: _id_ + expectResult: [ 11 ] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + distinct: *collection0Name + key: *fieldName + query: *filter + hint: _id_ + commandName: distinct + databaseName: *database0Name + + - description: "distinct with hint document" + operations: + - name: distinct + object: *collection0 + arguments: + fieldName: *fieldName + filter: *filter + hint: + _id: 1 + expectResult: [ 11 ] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + distinct: *collection0Name + key: *fieldName + query: *filter + hint: + _id: 1 + commandName: distinct + databaseName: *database0Name diff --git a/testdata/index-management/createSearchIndex.json b/testdata/index-management/createSearchIndex.json index 327cb61259..f4f2a6c661 100644 --- a/testdata/index-management/createSearchIndex.json +++ b/testdata/index-management/createSearchIndex.json @@ -28,7 +28,17 @@ ], "runOnRequirements": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "7.0.5", + "maxServerVersion": "7.0.99", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + }, + { + "minServerVersion": "7.2.0", "topologies": [ "replicaset", "load-balanced", diff --git a/testdata/index-management/createSearchIndex.yml b/testdata/index-management/createSearchIndex.yml index a32546cacf..8d05ee5042 100644 --- a/testdata/index-management/createSearchIndex.yml +++ b/testdata/index-management/createSearchIndex.yml @@ -16,7 +16,13 @@ createEntities: collectionName: *collection0 runOnRequirements: - - minServerVersion: "7.0.0" + # Skip server versions without fix of SERVER-83107 to avoid error message "BSON field 'createSearchIndexes.indexes.type' is an unknown field." + # SERVER-83107 was not backported to 7.1. + - minServerVersion: "7.0.5" + maxServerVersion: "7.0.99" + topologies: [ replicaset, load-balanced, sharded ] + serverless: forbid + - minServerVersion: "7.2.0" topologies: [ replicaset, load-balanced, sharded ] serverless: forbid diff --git a/testdata/index-management/createSearchIndexes.json b/testdata/index-management/createSearchIndexes.json index d91d7d9cf3..01300b1b7f 100644 --- a/testdata/index-management/createSearchIndexes.json +++ b/testdata/index-management/createSearchIndexes.json @@ -28,7 +28,17 @@ ], "runOnRequirements": [ { - "minServerVersion": "7.0.0", + "minServerVersion": "7.0.5", + "maxServerVersion": "7.0.99", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + }, + { + "minServerVersion": "7.2.0", "topologies": [ "replicaset", "load-balanced", diff --git a/testdata/index-management/createSearchIndexes.yml b/testdata/index-management/createSearchIndexes.yml index cac442cb87..56ee5ff208 100644 --- a/testdata/index-management/createSearchIndexes.yml +++ b/testdata/index-management/createSearchIndexes.yml @@ -16,7 +16,13 @@ createEntities: collectionName: *collection0 runOnRequirements: - - minServerVersion: "7.0.0" + # Skip server versions without fix of SERVER-83107 to avoid error message "BSON field 'createSearchIndexes.indexes.type' is an unknown field." + # SERVER-83107 was not backported to 7.1. + - minServerVersion: "7.0.5" + maxServerVersion: "7.0.99" + topologies: [ replicaset, load-balanced, sharded ] + serverless: forbid + - minServerVersion: "7.2.0" topologies: [ replicaset, load-balanced, sharded ] serverless: forbid diff --git a/testdata/initial-dns-seedlist-discovery/README.rst b/testdata/initial-dns-seedlist-discovery/README.rst deleted file mode 100644 index 0e6404aa5b..0000000000 --- a/testdata/initial-dns-seedlist-discovery/README.rst +++ /dev/null @@ -1,135 +0,0 @@ -==================================== -Initial DNS Seedlist Discovery tests -==================================== - -This directory contains platform-independent tests that drivers can use -to prove their conformance to the Initial DNS Seedlist Discovery spec. - -Test Setup ----------- - -The tests in the ``replica-set`` directory MUST be executed against a -three-node replica set on localhost ports 27017, 27018, and 27019 with -replica set name ``repl0``. - -The tests in the ``load-balanced`` directory MUST be executed against a -load-balanced sharded cluster with the mongos servers running on localhost ports -27017 and 27018 (corresponding to the script in `drivers-evergreen-tools`_). The -load balancers, shard servers, and config servers may run on any open ports. - -.. _`drivers-evergreen-tools`: https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/run-load-balancer.sh - -The tests in the ``sharded`` directory MUST be executed against a sharded -cluster with the mongos servers running on localhost ports 27017 and 27018. -Shard servers and config servers may run on any open ports. - -In all cases, the clusters MUST be started with SSL enabled. - -To run the tests that accompany this spec, you need to configure the SRV and -TXT records with a real name server. The following records are required for -these tests:: - - Record TTL Class Address - localhost.test.build.10gen.cc. 86400 IN A 127.0.0.1 - localhost.sub.test.build.10gen.cc. 86400 IN A 127.0.0.1 - - Record TTL Class Port Target - _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27018 localhost.test.build.10gen.cc. - _mongodb._tcp.test2.test.build.10gen.cc. 86400 IN SRV 27018 localhost.test.build.10gen.cc. - _mongodb._tcp.test2.test.build.10gen.cc. 86400 IN SRV 27019 localhost.test.build.10gen.cc. - _mongodb._tcp.test3.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test5.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test6.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test7.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test8.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test10.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test11.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test12.test.build.10gen.cc. 86400 IN SRV 27017 localhost.build.10gen.cc. - _mongodb._tcp.test13.test.build.10gen.cc. 86400 IN SRV 27017 test.build.10gen.cc. - _mongodb._tcp.test14.test.build.10gen.cc. 86400 IN SRV 27017 localhost.not-test.build.10gen.cc. - _mongodb._tcp.test15.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.not-build.10gen.cc. - _mongodb._tcp.test16.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.not-10gen.cc. - _mongodb._tcp.test17.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.not-cc. - _mongodb._tcp.test18.test.build.10gen.cc. 86400 IN SRV 27017 localhost.sub.test.build.10gen.cc. - _mongodb._tcp.test19.test.build.10gen.cc. 86400 IN SRV 27017 localhost.evil.build.10gen.cc. - _mongodb._tcp.test19.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test20.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test21.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _customname._tcp.test22.test.build.10gen.cc 86400 IN SRV 27017 localhost.test.build.10gen.cc - - Record TTL Class Text - test5.test.build.10gen.cc. 86400 IN TXT "replicaSet=repl0&authSource=thisDB" - test6.test.build.10gen.cc. 86400 IN TXT "replicaSet=repl0" - test6.test.build.10gen.cc. 86400 IN TXT "authSource=otherDB" - test7.test.build.10gen.cc. 86400 IN TXT "ssl=false" - test8.test.build.10gen.cc. 86400 IN TXT "authSource" - test10.test.build.10gen.cc. 86400 IN TXT "socketTimeoutMS=500" - test11.test.build.10gen.cc. 86400 IN TXT "replicaS" "et=rep" "l0" - test20.test.build.10gen.cc. 86400 IN TXT "loadBalanced=true" - test21.test.build.10gen.cc. 86400 IN TXT "loadBalanced=false" - -Note that ``test4`` is omitted deliberately to test what happens with no SRV -record. ``test9`` is missing because it was deleted during the development of -the tests. The missing ``test.`` sub-domain in the SRV record target for -``test12`` is deliberate. ``test22`` is used to test a custom service name -(``customname``). - -In our tests we have used ``localhost.test.build.10gen.cc`` as the domain, and -then configured ``localhost.test.build.10gen.cc`` to resolve to 127.0.0.1. - -You need to adapt the records shown above to replace ``test.build.10gen.cc`` -with your own domain name, and update the "uri" field in the YAML or JSON files -in this directory with the actual domain. - -Test Format and Use -------------------- - -These YAML and JSON files contain the following fields: - -- ``uri``: a ``mongodb+srv`` connection string -- ``seeds``: the expected set of initial seeds discovered from the SRV record -- ``numSeeds``: the expected number of initial seeds discovered from the SRV - record. This is mainly used to test ``srvMaxHosts``, since randomly selected - hosts cannot be deterministically asserted. -- ``hosts``: the discovered topology's list of hosts once SDAM completes a scan -- ``numHosts``: the expected number of hosts discovered once SDAM completes a - scan. This is mainly used to test ``srvMaxHosts``, since randomly selected - hosts cannot be deterministically asserted. -- ``options``: the parsed `URI options`_ as discovered from the - `Connection String`_'s "Connection Options" component and SRV resolution - (e.g. TXT records, implicit ``tls`` default). -- ``parsed_options``: additional, parsed options from other `Connection String`_ - components. This is mainly used for asserting ``UserInfo`` (as ``user`` and - ``password``) and ``Auth database`` (as ``auth_database``). -- ``error``: indicates that the parsing of the URI, or the resolving or - contents of the SRV or TXT records included errors. -- ``comment``: a comment to indicate why a test would fail. - -.. _`Connection String`: ../../connection-string/connection-string-spec.rst -.. _`URI options`: ../../uri-options/uri-options.rst - -For each file, create a MongoClient initialized with the ``mongodb+srv`` -connection string. - -If ``seeds`` is specified, drivers SHOULD verify that the set of hosts in the -client's initial seedlist matches the list in ``seeds``. If ``numSeeds`` is -specified, drivers SHOULD verify that the size of that set matches ``numSeeds``. - -If ``hosts`` is specified, drivers MUST verify that the set of -ServerDescriptions in the client's TopologyDescription eventually matches the -list in ``hosts``. If ``numHosts`` is specified, drivers MUST verify that the -size of that set matches ``numHosts``. - -If ``options`` is specified, drivers MUST verify each of the values under -``options`` match the MongoClient's parsed value for that option. There may be -other options parsed by the MongoClient as well, which a test does not verify. - -If ``parsed_options`` is specified, drivers MUST verify that each of the values -under ``parsed_options`` match the MongoClient's parsed value for that option. -Supported values include, but are not limited to, ``user`` and ``password`` -(parsed from ``UserInfo``) and ``auth_database`` (parsed from -``Auth database``). - -If ``error`` is specified and ``true``, drivers MUST verify that an error has -been thrown. diff --git a/testdata/kmip-certs/README.md b/testdata/kmip-certs/README.md deleted file mode 100644 index 383c0228d4..0000000000 --- a/testdata/kmip-certs/README.md +++ /dev/null @@ -1,3 +0,0 @@ -These Elliptic Curve (EC) certificates were generated by running `etc/gen-ec-certs/gen-ec-certs.sh`. -The EC certificates are used for testing the Go driver with PyKMIP. -PyKMIP does not support Golang's default TLS cipher suites with RSA. diff --git a/testdata/load-balancers/README.rst b/testdata/load-balancers/README.rst deleted file mode 100644 index 3975e7b0b7..0000000000 --- a/testdata/load-balancers/README.rst +++ /dev/null @@ -1,68 +0,0 @@ -=========================== -Load Balancer Support Tests -=========================== - -.. contents:: - ----- - -Introduction -============ - -This document describes how drivers should create load balanced clusters for -testing and how tests should be executed for such clusters. - -Testing Requirements -==================== - -For each server version that supports load balanced clusters, drivers MUST -add two Evergreen tasks: one with a sharded cluster with both authentication -and TLS enabled and one with a sharded cluster with authentication and TLS -disabled. In each task, the sharded cluster MUST be configured with two -mongos nodes running on localhost ports 27017 and 27018. The shard and config -servers may run on any free ports. Each task MUST also start up two TCP load -balancers operating in round-robin mode: one fronting both mongos servers and -one fronting a single mongos. - -Load Balancer Configuration ---------------------------- - -Drivers MUST use the ``run-load-balancer.sh`` script in -``drivers-evergreen-tools`` to start the TCP load balancers for Evergreen -tasks. This script MUST be run after the backing sharded cluster has already -been started. The script writes the URIs of the load balancers to a YAML -expansions file, which can be read by drivers via the ``expansions.update`` -Evergreen command. This will store the URIs into the ``SINGLE_MONGOS_LB_URI`` -and ``MULTI_MONGOS_LB_URI`` environment variables. - -Test Runner Configuration -------------------------- - -If the backing sharded cluster is configured with TLS enabled, drivers MUST -add the relevant TLS options to both ``SINGLE_MONGOS_LB_URI`` and -``MULTI_MONGOS_LB_URI`` to ensure that test clients can connect to the -cluster. Drivers MUST use the final URI stored in ``SINGLE_MONGOS_LB_URI`` -(with additional TLS options if required) to configure internal clients for -test runners (e.g. the internal MongoClient described by the `Unified Test -Format spec <../../unified-test-format/unified-test-format.rst>`__). - -In addition to modifying load balancer URIs, drivers MUST also mock server -support for returning a ``serviceId`` field in ``hello`` or legacy ``hello`` -command responses when running tests against a load-balanced cluster. This -can be done by using the value of ``topologyVersion.processId`` to set -``serviceId``. This MUST be done for all connections established by the test -runner, including those made by any internal clients. - -Tests -====== - -The YAML and JSON files in this directory contain platform-independent tests -written in the `Unified Test Format -<../../unified-test-format/unified-test-format.rst>`_. Drivers MUST run the -following test suites against a load balanced cluster: - -#. All test suites written in the Unified Test Format -#. Retryable Reads -#. Retryable Writes -#. Change Streams -#. Initial DNS Seedlist Discovery diff --git a/testdata/max-staleness/README.rst b/testdata/max-staleness/README.rst deleted file mode 100644 index 9cf945c85e..0000000000 --- a/testdata/max-staleness/README.rst +++ /dev/null @@ -1,72 +0,0 @@ -=================== -Max Staleness Tests -=================== - -This directory contains platform-independent tests that drivers can use -to prove their conformance to the Max Staleness Spec. The tests -are provided in both YAML and JSON formats, and drivers may test against -whichever format is more convenient for them. - -Test Format and Use -------------------- - -YAML files contain the following setup for each test: - -- ``heartbeatFrequencyMS``: optional int - -- ``topology_description``: the state of a mocked cluster - - - ``type``: the TopologyType - - - ``servers``: a list of ServerDescriptions, each with: - - - ``address``: a "host:port" - - - ``type``: a ServerType - - - ``avg_rtt_ms``: average round trip time in milliseconds [1]_ - - - ``lastWrite``: subdocument - - - ``lastWriteDate``: nonzero int64, milliseconds since some past time - - - ``maxWireVersion``: an int - - - ``lastUpdateTime``: milliseconds since the Unix epoch - -- ``read_preference``: a read preference document - -For each test, create a MongoClient. -Configure it with the heartbeatFrequencyMS specified by the test, -or accept the driver's default heartbeatFrequencyMS if the test omits this field. - -(Single-threaded and multi-threaded clients now make heartbeatFrequencyMS configurable. -This is a change in Server Discovery and Monitoring to support maxStalenessSeconds. -Before, multi-threaded clients were allowed to make it configurable or not.) - -For each test, create a new TopologyDescription object initialized with the -values from ``topology_description``. Initialize ServerDescriptions from the -provided data. Create a ReadPreference object initialized with the values -from ``read_preference``. Select servers that match the ReadPreference. - -Each test specifies that it expects an error, or specifies two sets of servers: - -- ``error: true`` -- ``suitable_servers``: the set of servers in the TopologyDescription - that are suitable for the ReadPreference, without taking ``avg_rtt_ms`` - into account. -- ``in_latency_window``: the set of suitable servers whose round trip time - qualifies them according to the default latency threshold of 15ms. - In each test there is one server in the latency window, to ensure - tests pass or fail deterministically. - -If the file contains ``error: true``, drivers MUST test that they throw an -error during server selection due to an invalid read preference. For other -files, drivers MUST test that they correctly select the set of servers in -``in_latency_window``. - -Drivers MAY also test that before filtration by latency, they select the -specified set of "suitable" servers. - -.. [1] ``avg_rtt_ms`` is included merely for consistency with - Server Selection tests. It is not significant in Max Staleness tests. diff --git a/testdata/read-write-concern/README.rst b/testdata/read-write-concern/README.rst deleted file mode 100644 index df94273510..0000000000 --- a/testdata/read-write-concern/README.rst +++ /dev/null @@ -1,80 +0,0 @@ -======================= -Connection String Tests -======================= - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the Read and Write Concern -specification. - -Version -------- - -Files in the "specifications" repository have no version scheme. They are not -tied to a MongoDB server version. - -Format ------- - -Connection String -~~~~~~~~~~~~~~~~~ - -These tests are designed to exercise the connection string parsing related -to read concern and write concern. - -Each YAML file contains an object with a single ``tests`` key. This key is an -array of test case objects, each of which have the following keys: - -- ``description``: A string describing the test. -- ``uri``: A string containing the URI to be parsed. -- ``valid:``: a boolean indicating if parsing the uri should result in an error. -- ``writeConcern:`` A document indicating the expected write concern. -- ``readConcern:`` A document indicating the expected read concern. - -If a test case includes a null value for one of these keys, or if the key is missing, -no assertion is necessary. This both simplifies parsing of the test files and allows flexibility -for drivers that might substitute default values *during* parsing. - -Document -~~~~~~~~ - -These tests are designed to ensure compliance with the spec in relation to what should be -sent to the server. - -Each YAML file contains an object with a single ``tests`` key. This key is an -array of test case objects, each of which have the following keys: - -- ``description``: A string describing the test. -- ``valid:``: a boolean indicating if the write concern created from the document is valid. -- ``writeConcern:`` A document indicating the write concern to use. -- ``writeConcernDocument:`` A document indicating the write concern to be sent to the server. -- ``readConcern:`` A document indicating the read concern to use. -- ``readConcernDocument:`` A document indicating the read concern to be sent to the server. -- ``isServerDefault:`` Indicates whether the read or write concern is considered the server's default. -- ``isAcknowledged:`` Indicates if the write concern should be considered acknowledged. - -Operation -~~~~~~~~~ - -These tests check that the default write concern is omitted in operations. - -The spec test format is an extension of `transactions spec tests `_ with the following additions: - -- ``writeConcern`` in the ``databaseOptions`` or ``collectionOptions`` may be an empty document to indicate a `server default write concern `_. For example, in libmongoc: - - .. code:: c - - /* Create a default write concern, and set on a collection object. */ - mongoc_write_concern_t *wc = mongoc_write_concern_new (); - mongoc_collection_set_write_concern (collection, wc); - - If the driver has no way to explicitly set a default write concern on a database or collection, ignore the empty ``writeConcern`` document and continue with the test. -- The operations ``createIndex``, ``dropIndex`` are introduced. - - -Use as unit tests -================= - -Testing whether a URI is valid or not should simply be a matter of checking -whether URI parsing raises an error or exception. -Testing for emitted warnings may require more legwork (e.g. configuring a log -handler and watching for output). diff --git a/testdata/retryable-reads/README.rst b/testdata/retryable-reads/README.rst deleted file mode 100644 index 0c3a3fee1a..0000000000 --- a/testdata/retryable-reads/README.rst +++ /dev/null @@ -1,234 +0,0 @@ -===================== -Retryable Reads Tests -===================== - -.. contents:: - ----- - -Introduction -============ - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the Retryable Reads spec. - -Prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Tests will require a MongoClient created with options defined in the tests. -Integration tests will require a running MongoDB cluster with server versions -4.0 or later. - -N.B. The spec specifies 3.6 as the minimum server version: however, -``failCommand`` is not supported on 3.6, so for now, testing requires MongoDB -4.0. Once `DRIVERS-560`_ is resolved, we will attempt to adapt its live failure -integration tests to test Retryable Reads on MongoDB 3.6. - -.. _DRIVERS-560: https://jira.mongodb.org/browse/DRIVERS-560 - -Server Fail Point -================= - -See: `Server Fail Point`_ in the Transactions spec test suite. - -.. _Server Fail Point: ../../transactions/tests#server-fail-point - -Disabling Fail Point after Test Execution ------------------------------------------ - -After each test that configures a fail point, drivers should disable the -``failCommand`` fail point to avoid spurious failures in -subsequent tests. The fail point may be disabled like so:: - - db.runCommand({ - configureFailPoint: "failCommand", - mode: "off" - }); - -Network Error Tests -=================== - -Network error tests are expressed in YAML and should be run against a standalone, -shard cluster, or single-node replica set. - - -Test Format ------------ - -Each YAML file has the following keys: - -- ``runOn`` (optional): An array of server version and/or topology requirements - for which the tests can be run. If the test environment satisfies one or more - of these requirements, the tests may be executed; otherwise, this file should - be skipped. If this field is omitted, the tests can be assumed to have no - particular requirements and should be executed. Each element will have some or - all of the following fields: - - - ``minServerVersion`` (optional): The minimum server version (inclusive) - required to successfully run the tests. If this field is omitted, it should - be assumed that there is no lower bound on the required server version. - - - ``maxServerVersion`` (optional): The maximum server version (inclusive) - against which the tests can be run successfully. If this field is omitted, - it should be assumed that there is no upper bound on the required server - version. - - - ``topology`` (optional): An array of server topologies against which the - tests can be run successfully. Valid topologies are "single", - "replicaset", "sharded", and "load-balanced". If this field is omitted, - the default is all topologies (i.e. ``["single", "replicaset", "sharded", - "load-balanced"]``). - - - ``serverless``: Optional string. Whether or not the test should be run on - serverless instances imitating sharded clusters. Valid values are "require", - "forbid", and "allow". If "require", the test MUST only be run on serverless - instances. If "forbid", the test MUST NOT be run on serverless instances. If - omitted or "allow", this option has no effect. - - The test runner MUST be informed whether or not serverless is being used in - order to determine if this requirement is met (e.g. through an environment - variable or configuration option). Since the serverless proxy imitates a - mongos, the runner is not capable of determining this by issuing a server - command such as ``buildInfo`` or ``hello``. - -- ``database_name`` and ``collection_name``: Optional. The database and - collection to use for testing. - -- ``bucket_name``: Optional. The GridFS bucket name to use for testing. - -- ``data``: The data that should exist in the collection(s) under test before - each test run. This will typically be an array of documents to be inserted - into the collection under test (i.e. ``collection_name``); however, this field - may also be an object mapping collection names to arrays of documents to be - inserted into the specified collection. - -- ``tests``: An array of tests that are to be run independently of each other. - Each test will have some or all of the following fields: - - - ``description``: The name of the test. - - - ``clientOptions``: Optional, parameters to pass to MongoClient(). - - - ``useMultipleMongoses`` (optional): If ``true``, the MongoClient for this - test should be initialized with multiple mongos seed addresses. If ``false`` - or omitted, only a single mongos address should be specified. This field has - no effect for non-sharded topologies. - - - ``skipReason``: Optional, string describing why this test should be skipped. - - - ``failPoint``: Optional, a server fail point to enable, expressed as the - configureFailPoint command to run on the admin database. - - - ``operations``: An array of documents describing an operation to be - executed. Each document has the following fields: - - - ``name``: The name of the operation on ``object``. - - - ``object``: The name of the object to perform the operation on. Can be - "database", "collection", "client", or "gridfsbucket." - - - ``arguments``: Optional, the names and values of arguments. - - - ``result``: Optional. The return value from the operation, if any. This - field may be a scalar (e.g. in the case of a count), a single document, or - an array of documents in the case of a multi-document read. - - - ``error``: Optional. If ``true``, the test should expect an error or - exception. - - - ``expectations``: Optional list of command-started events. - -GridFS Tests ------------- - -GridFS tests are denoted by when the YAML file contains ``bucket_name``. -The ``data`` field will also be an object, which maps collection names -(e.g. ``fs.files``) to an array of documents that should be inserted into -the specified collection. - -``fs.files`` and ``fs.chunks`` should be created in the database -specified by ``database_name``. This could be done via inserts or by -creating GridFSBuckets—using the GridFS ``bucketName`` (see -`GridFSBucket spec`_) specified by ``bucket_name`` field in the YAML -file—and calling ``upload_from_stream_with_id`` with the appropriate -data. - -``Download`` tests should be tested against ``GridFS.download_to_stream``. -``DownloadByName`` tests should be tested against -``GridFS.download_to_stream_by_name``. - - -.. _GridFSBucket spec: https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst#configurable-gridfsbucket-class - - -Speeding Up Tests ------------------ - -Drivers can greatly reduce the execution time of tests by setting `heartbeatFrequencyMS`_ -and `minHeartbeatFrequencyMS`_ (internally) to a small value (e.g. 5ms), below what -is normally permitted in the SDAM spec. If a test specifies an explicit value for -heartbeatFrequencyMS (e.g. client or URI options), drivers MUST use that value. - -.. _minHeartbeatFrequencyMS: ../../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#minheartbeatfrequencyms -.. _heartbeatFrequencyMS: ../../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#heartbeatfrequencyms - -Optional Enumeration Commands -============================= - -A driver only needs to test the optional enumeration commands it has chosen to -implement (e.g. ``Database.listCollectionNames()``). - -PoolClearedError Retryability Test -================================== - -This test will be used to ensure drivers properly retry after encountering PoolClearedErrors. -This test MUST be implemented by any driver that implements the CMAP specification. - -1. Create a client with maxPoolSize=1 and retryReads=true. If testing against a - sharded deployment, be sure to connect to only a single mongos. - -2. Enable the following failpoint:: - - { - configureFailPoint: "failCommand", - mode: { times: 1 }, - data: { - failCommands: ["find"], - errorCode: 91, - blockConnection: true, - blockTimeMS: 1000 - } - } - -3. Start two threads and attempt to perform a ``findOne`` simultaneously on both. - -4. Verify that both ``findOne`` attempts succeed. - -5. Via CMAP monitoring, assert that the first check out succeeds. - -6. Via CMAP monitoring, assert that a PoolClearedEvent is then emitted. - -7. Via CMAP monitoring, assert that the second check out then fails due to a - connection error. - -8. Via Command Monitoring, assert that exactly three ``find`` CommandStartedEvents - were observed in total. - -9. Disable the failpoint. - - -Changelog -========= - -:2019-03-19: Add top-level ``runOn`` field to denote server version and/or - topology requirements requirements for the test file. Removes the - ``minServerVersion`` and ``topology`` top-level fields, which are - now expressed within ``runOn`` elements. - - Add test-level ``useMultipleMongoses`` field. - -:2020-09-16: Suggest lowering heartbeatFrequencyMS in addition to minHeartbeatFrequencyMS. - -:2021-03-23: Add prose test for retrying PoolClearedErrors - -:2021-04-29: Add ``load-balanced`` to test topology requirements. diff --git a/testdata/retryable-reads/aggregate-merge.json b/testdata/retryable-reads/legacy/aggregate-merge.json similarity index 100% rename from testdata/retryable-reads/aggregate-merge.json rename to testdata/retryable-reads/legacy/aggregate-merge.json diff --git a/testdata/retryable-reads/aggregate-merge.yml b/testdata/retryable-reads/legacy/aggregate-merge.yml similarity index 100% rename from testdata/retryable-reads/aggregate-merge.yml rename to testdata/retryable-reads/legacy/aggregate-merge.yml diff --git a/testdata/retryable-reads/aggregate-serverErrors.json b/testdata/retryable-reads/legacy/aggregate-serverErrors.json similarity index 100% rename from testdata/retryable-reads/aggregate-serverErrors.json rename to testdata/retryable-reads/legacy/aggregate-serverErrors.json diff --git a/testdata/retryable-reads/aggregate-serverErrors.yml b/testdata/retryable-reads/legacy/aggregate-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/aggregate-serverErrors.yml rename to testdata/retryable-reads/legacy/aggregate-serverErrors.yml diff --git a/testdata/retryable-reads/aggregate.json b/testdata/retryable-reads/legacy/aggregate.json similarity index 100% rename from testdata/retryable-reads/aggregate.json rename to testdata/retryable-reads/legacy/aggregate.json diff --git a/testdata/retryable-reads/aggregate.yml b/testdata/retryable-reads/legacy/aggregate.yml similarity index 100% rename from testdata/retryable-reads/aggregate.yml rename to testdata/retryable-reads/legacy/aggregate.yml diff --git a/testdata/retryable-reads/changeStreams-client.watch-serverErrors.json b/testdata/retryable-reads/legacy/changeStreams-client.watch-serverErrors.json similarity index 100% rename from testdata/retryable-reads/changeStreams-client.watch-serverErrors.json rename to testdata/retryable-reads/legacy/changeStreams-client.watch-serverErrors.json diff --git a/testdata/retryable-reads/changeStreams-client.watch-serverErrors.yml b/testdata/retryable-reads/legacy/changeStreams-client.watch-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/changeStreams-client.watch-serverErrors.yml rename to testdata/retryable-reads/legacy/changeStreams-client.watch-serverErrors.yml diff --git a/testdata/retryable-reads/changeStreams-client.watch.json b/testdata/retryable-reads/legacy/changeStreams-client.watch.json similarity index 100% rename from testdata/retryable-reads/changeStreams-client.watch.json rename to testdata/retryable-reads/legacy/changeStreams-client.watch.json diff --git a/testdata/retryable-reads/changeStreams-client.watch.yml b/testdata/retryable-reads/legacy/changeStreams-client.watch.yml similarity index 100% rename from testdata/retryable-reads/changeStreams-client.watch.yml rename to testdata/retryable-reads/legacy/changeStreams-client.watch.yml diff --git a/testdata/retryable-reads/changeStreams-db.coll.watch-serverErrors.json b/testdata/retryable-reads/legacy/changeStreams-db.coll.watch-serverErrors.json similarity index 100% rename from testdata/retryable-reads/changeStreams-db.coll.watch-serverErrors.json rename to testdata/retryable-reads/legacy/changeStreams-db.coll.watch-serverErrors.json diff --git a/testdata/retryable-reads/changeStreams-db.coll.watch-serverErrors.yml b/testdata/retryable-reads/legacy/changeStreams-db.coll.watch-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/changeStreams-db.coll.watch-serverErrors.yml rename to testdata/retryable-reads/legacy/changeStreams-db.coll.watch-serverErrors.yml diff --git a/testdata/retryable-reads/changeStreams-db.coll.watch.json b/testdata/retryable-reads/legacy/changeStreams-db.coll.watch.json similarity index 100% rename from testdata/retryable-reads/changeStreams-db.coll.watch.json rename to testdata/retryable-reads/legacy/changeStreams-db.coll.watch.json diff --git a/testdata/retryable-reads/changeStreams-db.coll.watch.yml b/testdata/retryable-reads/legacy/changeStreams-db.coll.watch.yml similarity index 100% rename from testdata/retryable-reads/changeStreams-db.coll.watch.yml rename to testdata/retryable-reads/legacy/changeStreams-db.coll.watch.yml diff --git a/testdata/retryable-reads/changeStreams-db.watch-serverErrors.json b/testdata/retryable-reads/legacy/changeStreams-db.watch-serverErrors.json similarity index 100% rename from testdata/retryable-reads/changeStreams-db.watch-serverErrors.json rename to testdata/retryable-reads/legacy/changeStreams-db.watch-serverErrors.json diff --git a/testdata/retryable-reads/changeStreams-db.watch-serverErrors.yml b/testdata/retryable-reads/legacy/changeStreams-db.watch-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/changeStreams-db.watch-serverErrors.yml rename to testdata/retryable-reads/legacy/changeStreams-db.watch-serverErrors.yml diff --git a/testdata/retryable-reads/changeStreams-db.watch.json b/testdata/retryable-reads/legacy/changeStreams-db.watch.json similarity index 100% rename from testdata/retryable-reads/changeStreams-db.watch.json rename to testdata/retryable-reads/legacy/changeStreams-db.watch.json diff --git a/testdata/retryable-reads/changeStreams-db.watch.yml b/testdata/retryable-reads/legacy/changeStreams-db.watch.yml similarity index 100% rename from testdata/retryable-reads/changeStreams-db.watch.yml rename to testdata/retryable-reads/legacy/changeStreams-db.watch.yml diff --git a/testdata/retryable-reads/count-serverErrors.json b/testdata/retryable-reads/legacy/count-serverErrors.json similarity index 100% rename from testdata/retryable-reads/count-serverErrors.json rename to testdata/retryable-reads/legacy/count-serverErrors.json diff --git a/testdata/retryable-reads/count-serverErrors.yml b/testdata/retryable-reads/legacy/count-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/count-serverErrors.yml rename to testdata/retryable-reads/legacy/count-serverErrors.yml diff --git a/testdata/retryable-reads/count.json b/testdata/retryable-reads/legacy/count.json similarity index 100% rename from testdata/retryable-reads/count.json rename to testdata/retryable-reads/legacy/count.json diff --git a/testdata/retryable-reads/count.yml b/testdata/retryable-reads/legacy/count.yml similarity index 100% rename from testdata/retryable-reads/count.yml rename to testdata/retryable-reads/legacy/count.yml diff --git a/testdata/retryable-reads/countDocuments-serverErrors.json b/testdata/retryable-reads/legacy/countDocuments-serverErrors.json similarity index 100% rename from testdata/retryable-reads/countDocuments-serverErrors.json rename to testdata/retryable-reads/legacy/countDocuments-serverErrors.json diff --git a/testdata/retryable-reads/countDocuments-serverErrors.yml b/testdata/retryable-reads/legacy/countDocuments-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/countDocuments-serverErrors.yml rename to testdata/retryable-reads/legacy/countDocuments-serverErrors.yml diff --git a/testdata/retryable-reads/countDocuments.json b/testdata/retryable-reads/legacy/countDocuments.json similarity index 100% rename from testdata/retryable-reads/countDocuments.json rename to testdata/retryable-reads/legacy/countDocuments.json diff --git a/testdata/retryable-reads/countDocuments.yml b/testdata/retryable-reads/legacy/countDocuments.yml similarity index 100% rename from testdata/retryable-reads/countDocuments.yml rename to testdata/retryable-reads/legacy/countDocuments.yml diff --git a/testdata/retryable-reads/distinct-serverErrors.json b/testdata/retryable-reads/legacy/distinct-serverErrors.json similarity index 100% rename from testdata/retryable-reads/distinct-serverErrors.json rename to testdata/retryable-reads/legacy/distinct-serverErrors.json diff --git a/testdata/retryable-reads/distinct-serverErrors.yml b/testdata/retryable-reads/legacy/distinct-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/distinct-serverErrors.yml rename to testdata/retryable-reads/legacy/distinct-serverErrors.yml diff --git a/testdata/retryable-reads/distinct.json b/testdata/retryable-reads/legacy/distinct.json similarity index 100% rename from testdata/retryable-reads/distinct.json rename to testdata/retryable-reads/legacy/distinct.json diff --git a/testdata/retryable-reads/distinct.yml b/testdata/retryable-reads/legacy/distinct.yml similarity index 100% rename from testdata/retryable-reads/distinct.yml rename to testdata/retryable-reads/legacy/distinct.yml diff --git a/testdata/retryable-reads/estimatedDocumentCount-serverErrors.json b/testdata/retryable-reads/legacy/estimatedDocumentCount-serverErrors.json similarity index 100% rename from testdata/retryable-reads/estimatedDocumentCount-serverErrors.json rename to testdata/retryable-reads/legacy/estimatedDocumentCount-serverErrors.json diff --git a/testdata/retryable-reads/estimatedDocumentCount-serverErrors.yml b/testdata/retryable-reads/legacy/estimatedDocumentCount-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/estimatedDocumentCount-serverErrors.yml rename to testdata/retryable-reads/legacy/estimatedDocumentCount-serverErrors.yml diff --git a/testdata/retryable-reads/estimatedDocumentCount.json b/testdata/retryable-reads/legacy/estimatedDocumentCount.json similarity index 100% rename from testdata/retryable-reads/estimatedDocumentCount.json rename to testdata/retryable-reads/legacy/estimatedDocumentCount.json diff --git a/testdata/retryable-reads/estimatedDocumentCount.yml b/testdata/retryable-reads/legacy/estimatedDocumentCount.yml similarity index 100% rename from testdata/retryable-reads/estimatedDocumentCount.yml rename to testdata/retryable-reads/legacy/estimatedDocumentCount.yml diff --git a/testdata/retryable-reads/find-serverErrors.json b/testdata/retryable-reads/legacy/find-serverErrors.json similarity index 100% rename from testdata/retryable-reads/find-serverErrors.json rename to testdata/retryable-reads/legacy/find-serverErrors.json diff --git a/testdata/retryable-reads/find-serverErrors.yml b/testdata/retryable-reads/legacy/find-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/find-serverErrors.yml rename to testdata/retryable-reads/legacy/find-serverErrors.yml diff --git a/testdata/retryable-reads/find.json b/testdata/retryable-reads/legacy/find.json similarity index 100% rename from testdata/retryable-reads/find.json rename to testdata/retryable-reads/legacy/find.json diff --git a/testdata/retryable-reads/find.yml b/testdata/retryable-reads/legacy/find.yml similarity index 100% rename from testdata/retryable-reads/find.yml rename to testdata/retryable-reads/legacy/find.yml diff --git a/testdata/retryable-reads/findOne-serverErrors.json b/testdata/retryable-reads/legacy/findOne-serverErrors.json similarity index 100% rename from testdata/retryable-reads/findOne-serverErrors.json rename to testdata/retryable-reads/legacy/findOne-serverErrors.json diff --git a/testdata/retryable-reads/findOne-serverErrors.yml b/testdata/retryable-reads/legacy/findOne-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/findOne-serverErrors.yml rename to testdata/retryable-reads/legacy/findOne-serverErrors.yml diff --git a/testdata/retryable-reads/findOne.json b/testdata/retryable-reads/legacy/findOne.json similarity index 100% rename from testdata/retryable-reads/findOne.json rename to testdata/retryable-reads/legacy/findOne.json diff --git a/testdata/retryable-reads/findOne.yml b/testdata/retryable-reads/legacy/findOne.yml similarity index 100% rename from testdata/retryable-reads/findOne.yml rename to testdata/retryable-reads/legacy/findOne.yml diff --git a/testdata/retryable-reads/gridfs-download-serverErrors.json b/testdata/retryable-reads/legacy/gridfs-download-serverErrors.json similarity index 100% rename from testdata/retryable-reads/gridfs-download-serverErrors.json rename to testdata/retryable-reads/legacy/gridfs-download-serverErrors.json diff --git a/testdata/retryable-reads/gridfs-download-serverErrors.yml b/testdata/retryable-reads/legacy/gridfs-download-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/gridfs-download-serverErrors.yml rename to testdata/retryable-reads/legacy/gridfs-download-serverErrors.yml diff --git a/testdata/retryable-reads/gridfs-download.json b/testdata/retryable-reads/legacy/gridfs-download.json similarity index 100% rename from testdata/retryable-reads/gridfs-download.json rename to testdata/retryable-reads/legacy/gridfs-download.json diff --git a/testdata/retryable-reads/gridfs-download.yml b/testdata/retryable-reads/legacy/gridfs-download.yml similarity index 100% rename from testdata/retryable-reads/gridfs-download.yml rename to testdata/retryable-reads/legacy/gridfs-download.yml diff --git a/testdata/retryable-reads/gridfs-downloadByName-serverErrors.json b/testdata/retryable-reads/legacy/gridfs-downloadByName-serverErrors.json similarity index 100% rename from testdata/retryable-reads/gridfs-downloadByName-serverErrors.json rename to testdata/retryable-reads/legacy/gridfs-downloadByName-serverErrors.json diff --git a/testdata/retryable-reads/gridfs-downloadByName-serverErrors.yml b/testdata/retryable-reads/legacy/gridfs-downloadByName-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/gridfs-downloadByName-serverErrors.yml rename to testdata/retryable-reads/legacy/gridfs-downloadByName-serverErrors.yml diff --git a/testdata/retryable-reads/gridfs-downloadByName.json b/testdata/retryable-reads/legacy/gridfs-downloadByName.json similarity index 100% rename from testdata/retryable-reads/gridfs-downloadByName.json rename to testdata/retryable-reads/legacy/gridfs-downloadByName.json diff --git a/testdata/retryable-reads/gridfs-downloadByName.yml b/testdata/retryable-reads/legacy/gridfs-downloadByName.yml similarity index 100% rename from testdata/retryable-reads/gridfs-downloadByName.yml rename to testdata/retryable-reads/legacy/gridfs-downloadByName.yml diff --git a/testdata/retryable-reads/listCollectionNames-serverErrors.json b/testdata/retryable-reads/legacy/listCollectionNames-serverErrors.json similarity index 100% rename from testdata/retryable-reads/listCollectionNames-serverErrors.json rename to testdata/retryable-reads/legacy/listCollectionNames-serverErrors.json diff --git a/testdata/retryable-reads/listCollectionNames-serverErrors.yml b/testdata/retryable-reads/legacy/listCollectionNames-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/listCollectionNames-serverErrors.yml rename to testdata/retryable-reads/legacy/listCollectionNames-serverErrors.yml diff --git a/testdata/retryable-reads/listCollectionNames.json b/testdata/retryable-reads/legacy/listCollectionNames.json similarity index 100% rename from testdata/retryable-reads/listCollectionNames.json rename to testdata/retryable-reads/legacy/listCollectionNames.json diff --git a/testdata/retryable-reads/listCollectionNames.yml b/testdata/retryable-reads/legacy/listCollectionNames.yml similarity index 100% rename from testdata/retryable-reads/listCollectionNames.yml rename to testdata/retryable-reads/legacy/listCollectionNames.yml diff --git a/testdata/retryable-reads/listCollectionObjects-serverErrors.json b/testdata/retryable-reads/legacy/listCollectionObjects-serverErrors.json similarity index 100% rename from testdata/retryable-reads/listCollectionObjects-serverErrors.json rename to testdata/retryable-reads/legacy/listCollectionObjects-serverErrors.json diff --git a/testdata/retryable-reads/listCollectionObjects-serverErrors.yml b/testdata/retryable-reads/legacy/listCollectionObjects-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/listCollectionObjects-serverErrors.yml rename to testdata/retryable-reads/legacy/listCollectionObjects-serverErrors.yml diff --git a/testdata/retryable-reads/listCollectionObjects.json b/testdata/retryable-reads/legacy/listCollectionObjects.json similarity index 100% rename from testdata/retryable-reads/listCollectionObjects.json rename to testdata/retryable-reads/legacy/listCollectionObjects.json diff --git a/testdata/retryable-reads/listCollectionObjects.yml b/testdata/retryable-reads/legacy/listCollectionObjects.yml similarity index 100% rename from testdata/retryable-reads/listCollectionObjects.yml rename to testdata/retryable-reads/legacy/listCollectionObjects.yml diff --git a/testdata/retryable-reads/listCollections-serverErrors.json b/testdata/retryable-reads/legacy/listCollections-serverErrors.json similarity index 100% rename from testdata/retryable-reads/listCollections-serverErrors.json rename to testdata/retryable-reads/legacy/listCollections-serverErrors.json diff --git a/testdata/retryable-reads/listCollections-serverErrors.yml b/testdata/retryable-reads/legacy/listCollections-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/listCollections-serverErrors.yml rename to testdata/retryable-reads/legacy/listCollections-serverErrors.yml diff --git a/testdata/retryable-reads/listCollections.json b/testdata/retryable-reads/legacy/listCollections.json similarity index 100% rename from testdata/retryable-reads/listCollections.json rename to testdata/retryable-reads/legacy/listCollections.json diff --git a/testdata/retryable-reads/listCollections.yml b/testdata/retryable-reads/legacy/listCollections.yml similarity index 100% rename from testdata/retryable-reads/listCollections.yml rename to testdata/retryable-reads/legacy/listCollections.yml diff --git a/testdata/retryable-reads/listDatabaseNames-serverErrors.json b/testdata/retryable-reads/legacy/listDatabaseNames-serverErrors.json similarity index 100% rename from testdata/retryable-reads/listDatabaseNames-serverErrors.json rename to testdata/retryable-reads/legacy/listDatabaseNames-serverErrors.json diff --git a/testdata/retryable-reads/listDatabaseNames-serverErrors.yml b/testdata/retryable-reads/legacy/listDatabaseNames-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/listDatabaseNames-serverErrors.yml rename to testdata/retryable-reads/legacy/listDatabaseNames-serverErrors.yml diff --git a/testdata/retryable-reads/listDatabaseNames.json b/testdata/retryable-reads/legacy/listDatabaseNames.json similarity index 100% rename from testdata/retryable-reads/listDatabaseNames.json rename to testdata/retryable-reads/legacy/listDatabaseNames.json diff --git a/testdata/retryable-reads/listDatabaseNames.yml b/testdata/retryable-reads/legacy/listDatabaseNames.yml similarity index 100% rename from testdata/retryable-reads/listDatabaseNames.yml rename to testdata/retryable-reads/legacy/listDatabaseNames.yml diff --git a/testdata/retryable-reads/listDatabaseObjects-serverErrors.json b/testdata/retryable-reads/legacy/listDatabaseObjects-serverErrors.json similarity index 100% rename from testdata/retryable-reads/listDatabaseObjects-serverErrors.json rename to testdata/retryable-reads/legacy/listDatabaseObjects-serverErrors.json diff --git a/testdata/retryable-reads/listDatabaseObjects-serverErrors.yml b/testdata/retryable-reads/legacy/listDatabaseObjects-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/listDatabaseObjects-serverErrors.yml rename to testdata/retryable-reads/legacy/listDatabaseObjects-serverErrors.yml diff --git a/testdata/retryable-reads/listDatabaseObjects.json b/testdata/retryable-reads/legacy/listDatabaseObjects.json similarity index 100% rename from testdata/retryable-reads/listDatabaseObjects.json rename to testdata/retryable-reads/legacy/listDatabaseObjects.json diff --git a/testdata/retryable-reads/listDatabaseObjects.yml b/testdata/retryable-reads/legacy/listDatabaseObjects.yml similarity index 100% rename from testdata/retryable-reads/listDatabaseObjects.yml rename to testdata/retryable-reads/legacy/listDatabaseObjects.yml diff --git a/testdata/retryable-reads/listDatabases-serverErrors.json b/testdata/retryable-reads/legacy/listDatabases-serverErrors.json similarity index 100% rename from testdata/retryable-reads/listDatabases-serverErrors.json rename to testdata/retryable-reads/legacy/listDatabases-serverErrors.json diff --git a/testdata/retryable-reads/listDatabases-serverErrors.yml b/testdata/retryable-reads/legacy/listDatabases-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/listDatabases-serverErrors.yml rename to testdata/retryable-reads/legacy/listDatabases-serverErrors.yml diff --git a/testdata/retryable-reads/listDatabases.json b/testdata/retryable-reads/legacy/listDatabases.json similarity index 100% rename from testdata/retryable-reads/listDatabases.json rename to testdata/retryable-reads/legacy/listDatabases.json diff --git a/testdata/retryable-reads/listDatabases.yml b/testdata/retryable-reads/legacy/listDatabases.yml similarity index 100% rename from testdata/retryable-reads/listDatabases.yml rename to testdata/retryable-reads/legacy/listDatabases.yml diff --git a/testdata/retryable-reads/listIndexNames-serverErrors.json b/testdata/retryable-reads/legacy/listIndexNames-serverErrors.json similarity index 100% rename from testdata/retryable-reads/listIndexNames-serverErrors.json rename to testdata/retryable-reads/legacy/listIndexNames-serverErrors.json diff --git a/testdata/retryable-reads/listIndexNames-serverErrors.yml b/testdata/retryable-reads/legacy/listIndexNames-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/listIndexNames-serverErrors.yml rename to testdata/retryable-reads/legacy/listIndexNames-serverErrors.yml diff --git a/testdata/retryable-reads/listIndexNames.json b/testdata/retryable-reads/legacy/listIndexNames.json similarity index 100% rename from testdata/retryable-reads/listIndexNames.json rename to testdata/retryable-reads/legacy/listIndexNames.json diff --git a/testdata/retryable-reads/listIndexNames.yml b/testdata/retryable-reads/legacy/listIndexNames.yml similarity index 100% rename from testdata/retryable-reads/listIndexNames.yml rename to testdata/retryable-reads/legacy/listIndexNames.yml diff --git a/testdata/retryable-reads/listIndexes-serverErrors.json b/testdata/retryable-reads/legacy/listIndexes-serverErrors.json similarity index 100% rename from testdata/retryable-reads/listIndexes-serverErrors.json rename to testdata/retryable-reads/legacy/listIndexes-serverErrors.json diff --git a/testdata/retryable-reads/listIndexes-serverErrors.yml b/testdata/retryable-reads/legacy/listIndexes-serverErrors.yml similarity index 100% rename from testdata/retryable-reads/listIndexes-serverErrors.yml rename to testdata/retryable-reads/legacy/listIndexes-serverErrors.yml diff --git a/testdata/retryable-reads/listIndexes.json b/testdata/retryable-reads/legacy/listIndexes.json similarity index 100% rename from testdata/retryable-reads/listIndexes.json rename to testdata/retryable-reads/legacy/listIndexes.json diff --git a/testdata/retryable-reads/listIndexes.yml b/testdata/retryable-reads/legacy/listIndexes.yml similarity index 100% rename from testdata/retryable-reads/listIndexes.yml rename to testdata/retryable-reads/legacy/listIndexes.yml diff --git a/testdata/retryable-reads/mapReduce.json b/testdata/retryable-reads/legacy/mapReduce.json similarity index 100% rename from testdata/retryable-reads/mapReduce.json rename to testdata/retryable-reads/legacy/mapReduce.json diff --git a/testdata/retryable-reads/mapReduce.yml b/testdata/retryable-reads/legacy/mapReduce.yml similarity index 100% rename from testdata/retryable-reads/mapReduce.yml rename to testdata/retryable-reads/legacy/mapReduce.yml diff --git a/testdata/retryable-reads/unified/readConcernMajorityNotAvailableYet.json b/testdata/retryable-reads/unified/readConcernMajorityNotAvailableYet.json new file mode 100644 index 0000000000..8aa6a6b5e5 --- /dev/null +++ b/testdata/retryable-reads/unified/readConcernMajorityNotAvailableYet.json @@ -0,0 +1,147 @@ +{ + "description": "ReadConcernMajorityNotAvailableYet is a retryable read", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "readconcernmajoritynotavailableyet_test" + } + } + ], + "initialData": [ + { + "collectionName": "readconcernmajoritynotavailableyet_test", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds on second attempt after ReadConcernMajorityNotAvailableYet", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 134 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "readconcernmajoritynotavailableyet_test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "readconcernmajoritynotavailableyet_test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/testdata/retryable-reads/unified/readConcernMajorityNotAvailableYet.yml b/testdata/retryable-reads/unified/readConcernMajorityNotAvailableYet.yml new file mode 100644 index 0000000000..707a62acd7 --- /dev/null +++ b/testdata/retryable-reads/unified/readConcernMajorityNotAvailableYet.yml @@ -0,0 +1,68 @@ +description: "ReadConcernMajorityNotAvailableYet is a retryable read" + +schemaVersion: "1.3" + +runOnRequirements: + - minServerVersion: "4.0" + topologies: [single, replicaset] + - minServerVersion: "4.1.7" + topologies: [sharded, load-balanced] + +createEntities: + - client: + id: &client0 client0 + # Ensure the `configureFailpoint` and `find` commands are run on the same mongos + useMultipleMongoses: false + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name "retryable-reads-tests" + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name "readconcernmajoritynotavailableyet_test" + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +tests: + - description: "Find succeeds on second attempt after ReadConcernMajorityNotAvailableYet" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ "find" ] + errorCode: 134 # ReadConcernMajorityNotAvailableYet + - name: find + arguments: + filter: { _id: { $gt: 1 } } + object: *collection0 + expectResult: + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: { _id: { $gt: 1 } } + commandName: find + databaseName: *database0Name + - commandStartedEvent: + command: + find: *collection0Name + filter: { _id: { $gt: 1 } } + commandName: find + databaseName: *database0Name diff --git a/testdata/retryable-writes/README.rst b/testdata/retryable-writes/README.rst deleted file mode 100644 index d681fff285..0000000000 --- a/testdata/retryable-writes/README.rst +++ /dev/null @@ -1,339 +0,0 @@ -===================== -Retryable Write Tests -===================== - -.. contents:: - ----- - -Introduction -============ - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the Retryable Writes spec. - -Several prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Tests will require a MongoClient created with options defined in the tests. -Integration tests will require a running MongoDB cluster with server versions -3.6.0 or later. The ``{setFeatureCompatibilityVersion: 3.6}`` admin command -will also need to have been executed to enable support for retryable writes on -the cluster. Some tests may have more stringent version requirements depending -on the fail points used. - -Server Fail Point -================= - -onPrimaryTransactionalWrite ---------------------------- - -Some tests depend on a server fail point, ``onPrimaryTransactionalWrite``, which -allows us to force a network error before the server would return a write result -to the client. The fail point also allows control whether the server will -successfully commit the write via its ``failBeforeCommitExceptionCode`` option. -Keep in mind that the fail point only triggers for transaction writes (i.e. write -commands including ``txnNumber`` and ``lsid`` fields). See `SERVER-29606`_ for -more information. - -.. _SERVER-29606: https://jira.mongodb.org/browse/SERVER-29606 - -The fail point may be configured like so:: - - db.runCommand({ - configureFailPoint: "onPrimaryTransactionalWrite", - mode: , - data: - }); - -``mode`` is a generic fail point option and may be assigned a string or document -value. The string values ``"alwaysOn"`` and ``"off"`` may be used to enable or -disable the fail point, respectively. A document may be used to specify either -``times`` or ``skip``, which are mutually exclusive: - -- ``{ times: }`` may be used to limit the number of times the fail - point may trigger before transitioning to ``"off"``. -- ``{ skip: }`` may be used to defer the first trigger of a fail - point, after which it will transition to ``"alwaysOn"``. - -The ``data`` option is a document that may be used to specify options that -control the fail point's behavior. As noted in `SERVER-29606`_, -``onPrimaryTransactionalWrite`` supports the following ``data`` options, which -may be combined if desired: - -- ``closeConnection``: Boolean option, which defaults to ``true``. If ``true``, - the connection on which the write is executed will be closed before a result - can be returned. -- ``failBeforeCommitExceptionCode``: Integer option, which is unset by default. - If set, the specified exception code will be thrown and the write will not be - committed. If unset, the write will be allowed to commit. - -failCommand ------------ - -Some tests depend on a server fail point, ``failCommand``, which allows the -client to force the server to return an error. Unlike -``onPrimaryTransactionalWrite``, ``failCommand`` does not allow the client to -directly control whether the server will commit the operation (execution of the -write depends on whether the ``closeConnection`` and/or ``errorCode`` options -are specified). See: `failCommand <../../transactions/tests#failcommand>`_ in -the Transactions spec test suite for more information. - -Disabling Fail Points after Test Execution ------------------------------------------- - -After each test that configures a fail point, drivers should disable the fail -point to avoid spurious failures in subsequent tests. The fail point may be -disabled like so:: - - db.runCommand({ - configureFailPoint: , - mode: "off" - }); - -Use as Integration Tests -======================== - -Integration tests are expressed in YAML and can be run against a replica set or -sharded cluster as denoted by the top-level ``runOn`` field. Tests that rely on -the ``onPrimaryTransactionalWrite`` fail point cannot be run against a sharded -cluster because the fail point is not supported by mongos. - -The tests exercise the following scenarios: - -- Single-statement write operations - - - Each test expecting a write result will encounter at-most one network error - for the write command. Retry attempts should return without error and allow - operation to succeed. Observation of the collection state will assert that - the write occurred at-most once. - - - Each test expecting an error will encounter successive network errors for - the write command. Observation of the collection state will assert that the - write was never committed on the server. - -- Multi-statement write operations - - - Each test expecting a write result will encounter at-most one network error - for some write command(s) in the batch. Retry attempts should return without - error and allow the batch to ultimately succeed. Observation of the - collection state will assert that each write occurred at-most once. - - - Each test expecting an error will encounter successive network errors for - some write command in the batch. The batch will ultimately fail with an - error, but observation of the collection state will assert that the failing - write was never committed on the server. We may observe that earlier writes - in the batch occurred at-most once. - -We cannot test a scenario where the first and second attempts both encounter -network errors but the write does actually commit during one of those attempts. -This is because (1) the fail point only triggers when a write would be committed -and (2) the skip and times options are mutually exclusive. That said, such a -test would mainly assert the server's correctness for at-most once semantics and -is not essential to assert driver correctness. - -Test Format ------------ - -Each YAML file has the following keys: - -- ``runOn`` (optional): An array of server version and/or topology requirements - for which the tests can be run. If the test environment satisfies one or more - of these requirements, the tests may be executed; otherwise, this file should - be skipped. If this field is omitted, the tests can be assumed to have no - particular requirements and should be executed. Each element will have some or - all of the following fields: - - - ``minServerVersion`` (optional): The minimum server version (inclusive) - required to successfully run the tests. If this field is omitted, it should - be assumed that there is no lower bound on the required server version. - - - ``maxServerVersion`` (optional): The maximum server version (inclusive) - against which the tests can be run successfully. If this field is omitted, - it should be assumed that there is no upper bound on the required server - version. - - - ``topology`` (optional): An array of server topologies against which the - tests can be run successfully. Valid topologies are "single", "replicaset", - and "sharded". If this field is omitted, the default is all topologies (i.e. - ``["single", "replicaset", "sharded"]``). - -- ``data``: The data that should exist in the collection under test before each - test run. - -- ``tests``: An array of tests that are to be run independently of each other. - Each test will have some or all of the following fields: - - - ``description``: The name of the test. - - - ``clientOptions``: Parameters to pass to MongoClient(). - - - ``useMultipleMongoses`` (optional): If ``true``, the MongoClient for this - test should be initialized with multiple mongos seed addresses. If ``false`` - or omitted, only a single mongos address should be specified. This field has - no effect for non-sharded topologies. - - - ``failPoint`` (optional): The ``configureFailPoint`` command document to run - to configure a fail point on the primary server. Drivers must ensure that - ``configureFailPoint`` is the first field in the command. This option and - ``useMultipleMongoses: true`` are mutually exclusive. - - - ``operation``: Document describing the operation to be executed. The - operation should be executed through a collection object derived from a - client that has been created with ``clientOptions``. The operation will have - some or all of the following fields: - - - ``name``: The name of the operation as defined in the CRUD specification. - - - ``arguments``: The names and values of arguments from the CRUD - specification. - - - ``outcome``: Document describing the return value and/or expected state of - the collection after the operation is executed. This will have some or all - of the following fields: - - - ``error``: If ``true``, the test should expect an error or exception. Note - that some drivers may report server-side errors as a write error within a - write result object. - - - ``result``: The return value from the operation. This will correspond to - an operation's result object as defined in the CRUD specification. This - field may be omitted if ``error`` is ``true``. If this field is present - and ``error`` is ``true`` (generally for multi-statement tests), the - result reports information about operations that succeeded before an - unrecoverable failure. In that case, drivers may choose to check the - result object if their BulkWriteException (or equivalent) provides access - to a write result object. - - - ``errorLabelsContain``: A list of error label strings that the - error is expected to have. - - - ``errorLabelsOmit``: A list of error label strings that the - error is expected not to have. - - - ``collection``: - - - ``name`` (optional): The name of the collection to verify. If this isn't - present then use the collection under test. - - - ``data``: The data that should exist in the collection after the - operation has been run. - -Split Batch Tests -================= - -The YAML tests specify bulk write operations that are split by command type -(e.g. sequence of insert, update, and delete commands). Multi-statement write -operations may also be split due to ``maxWriteBatchSize``, -``maxBsonObjectSize``, or ``maxMessageSizeBytes``. - -For instance, an insertMany operation with five 10 MiB documents executed using -OP_MSG payload type 0 (i.e. entire command in one document) would be split into -five insert commands in order to respect the 16 MiB ``maxBsonObjectSize`` limit. -The same insertMany operation executed using OP_MSG payload type 1 (i.e. command -arguments pulled out into a separate payload vector) would be split into two -insert commands in order to respect the 48 MB ``maxMessageSizeBytes`` limit. - -Noting when a driver might split operations, the ``onPrimaryTransactionalWrite`` -fail point's ``skip`` option may be used to control when the fail point first -triggers. Once triggered, the fail point will transition to the ``alwaysOn`` -state until disabled. Driver authors should also note that the server attempts -to process all documents in a single insert command within a single commit (i.e. -one insert command with five documents may only trigger the fail point once). -This behavior is unique to insert commands (each statement in an update and -delete command is processed independently). - -If testing an insert that is split into two commands, a ``skip`` of one will -allow the fail point to trigger on the second insert command (because all -documents in the first command will be processed in the same commit). When -testing an update or delete that is split into two commands, the ``skip`` should -be set to the number of statements in the first command to allow the fail point -to trigger on the second command. - -Command Construction Tests -========================== - -Drivers should also assert that command documents are properly constructed with -or without a transaction ID, depending on whether the write operation is -supported. `Command Monitoring`_ may be used to check for the presence of a -``txnNumber`` field in the command document. Note that command documents may -always include an ``lsid`` field per the `Driver Session`_ specification. - -.. _Command Monitoring: ../../command-monitoring/command-monitoring.rst -.. _Driver Session: ../../sessions/driver-sessions.rst - -These tests may be run against both a replica set and shard cluster. - -Drivers should test that transaction IDs are never included in commands for -unsupported write operations: - -* Write commands with unacknowledged write concerns (e.g. ``{w: 0}``) - -* Unsupported single-statement write operations - - - ``updateMany()`` - - ``deleteMany()`` - -* Unsupported multi-statement write operations - - - ``bulkWrite()`` that includes ``UpdateMany`` or ``DeleteMany`` - -* Unsupported write commands - - - ``aggregate`` with write stage (e.g. ``$out``, ``$merge``) - -Drivers should test that transactions IDs are always included in commands for -supported write operations: - -* Supported single-statement write operations - - - ``insertOne()`` - - ``updateOne()`` - - ``replaceOne()`` - - ``deleteOne()`` - - ``findOneAndDelete()`` - - ``findOneAndReplace()`` - - ``findOneAndUpdate()`` - -* Supported multi-statement write operations - - - ``insertMany()`` with ``ordered=true`` - - ``insertMany()`` with ``ordered=false`` - - ``bulkWrite()`` with ``ordered=true`` (no ``UpdateMany`` or ``DeleteMany``) - - ``bulkWrite()`` with ``ordered=false`` (no ``UpdateMany`` or ``DeleteMany``) - -Prose Tests -=========== - -The following tests ensure that retryable writes work properly with replica sets -and sharded clusters. - -#. Test that retryable writes raise an exception when using the MMAPv1 storage - engine. For this test, execute a write operation, such as ``insertOne``, - which should generate an exception. Assert that the error message is the - replacement error message:: - - This MongoDB deployment does not support retryable writes. Please add - retryWrites=false to your connection string. - - and the error code is 20. - - **Note**: Drivers that rely on ``serverStatus`` to determine the storage engine - in use MAY skip this test for sharded clusters, since ``mongos`` does not report - this information in its ``serverStatus`` response. - -Changelog -========= - -:2019-10-21: Add ``errorLabelsContain`` and ``errorLabelsContain`` fields to ``result`` - -:2019-08-07: Add Prose Tests section - -:2019-06-07: Mention $merge stage for aggregate alongside $out - -:2019-03-01: Add top-level ``runOn`` field to denote server version and/or - topology requirements requirements for the test file. Removes the - ``minServerVersion`` and ``maxServerVersion`` top-level fields, - which are now expressed within ``runOn`` elements. - - Add test-level ``useMultipleMongoses`` field. diff --git a/testdata/server-discovery-and-monitoring/README.rst b/testdata/server-discovery-and-monitoring/README.rst deleted file mode 100644 index 61c3572387..0000000000 --- a/testdata/server-discovery-and-monitoring/README.rst +++ /dev/null @@ -1,452 +0,0 @@ -===================================== -Server Discovery And Monitoring Tests -===================================== - -.. contents:: - ----- - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the -Server Discovery And Monitoring Spec. - -Additional prose tests, that cannot be represented as spec tests, are -described and MUST be implemented. - -Version -------- - -Files in the "specifications" repository have no version scheme. They are not -tied to a MongoDB server version. - -Format ------- - -Each YAML file has the following keys: - -- description: A textual description of the test. -- uri: A connection string. -- phases: An array of "phase" objects. - A phase of the test optionally sends inputs to the client, - then tests the client's resulting TopologyDescription. - -Each phase object has the following keys: - -- description: (optional) A textual description of this phase. -- responses: (optional) An array of "response" objects. If not provided, - the test runner should construct the client and perform assertions specified - in the outcome object without processing any responses. -- applicationErrors: (optional) An array of "applicationError" objects. -- outcome: An "outcome" object representing the TopologyDescription. - -A response is a pair of values: - -- The source, for example "a:27017". - This is the address the client sent the "hello" or legacy hello command to. -- A hello or legacy hello response, for example ``{ok: 1, helloOk: true, isWritablePrimary: true}``. - If the response includes an electionId it is shown in extended JSON like - ``{"$oid": "000000000000000000000002"}``. - The empty response `{}` indicates a network error - when attempting to call "hello" or legacy hello. - -An "applicationError" object has the following keys: - -- address: The source address, for example "a:27017". -- generation: (optional) The error's generation number, for example ``1``. - When absent this value defaults to the pool's current generation number. -- maxWireVersion: The ``maxWireVersion`` of the connection the error occurs - on, for example ``9``. Added to support testing the behavior of "not writable primary" - errors on <4.2 and >=4.2 servers. -- when: A string describing when this mock error should occur. Supported - values are: - - - "beforeHandshakeCompletes": Simulate this mock error as if it occurred - during a new connection's handshake for an application operation. - - "afterHandshakeCompletes": Simulate this mock error as if it occurred - on an established connection for an application operation (i.e. after - the connection pool check out succeeds). - -- type: The type of error to mock. Supported values are: - - - "command": A command error. Always accompanied with a "response". - - "network": A non-timeout network error. - - "timeout": A network timeout error. - -- response: (optional) A command error response, for example - ``{ok: 0, errmsg: "not primary"}``. Present if and only if ``type`` is - "command". Note the server only returns "not primary" if the "hello" command - has been run on this connection. Otherwise the legacy error message is returned. - -In non-monitoring tests, an "outcome" represents the correct -TopologyDescription that results from processing the responses in the phases -so far. It has the following keys: - -- topologyType: A string like "ReplicaSetNoPrimary". -- setName: A string with the expected replica set name, or null. -- servers: An object whose keys are addresses like "a:27017", and whose values - are "server" objects. -- logicalSessionTimeoutMinutes: null or an integer. -- maxSetVersion: absent or an integer. -- maxElectionId: absent or a BSON ObjectId. -- compatible: absent or a bool. - -A "server" object represents a correct ServerDescription within the client's -current TopologyDescription. It has the following keys: - -- type: A ServerType name, like "RSSecondary". -- setName: A string with the expected replica set name, or null. -- setVersion: absent or an integer. -- electionId: absent, null, or an ObjectId. -- logicalSessionTimeoutMinutes: absent, null, or an integer. -- minWireVersion: absent or an integer. -- maxWireVersion: absent or an integer. -- topologyVersion: absent, null, or a topologyVersion document. -- pool: (optional) A "pool" object. - -A "pool" object represents a correct connection pool for a given server. -It has the following keys: - -- generation: This server's expected pool generation, like ``0``. - -In monitoring tests, an "outcome" contains a list of SDAM events that should -have been published by the client as a result of processing hello or legacy hello -responses in the current phase. Any SDAM events published by the client during its -construction (that is, prior to processing any of the responses) should be -combined with the events published during processing of hello or legacy hello -responses of the first phase of the test. A test MAY explicitly verify events -published during client construction by providing an empty responses array for the -first phase. - - -Use as unittests ----------------- - -Mocking -~~~~~~~ - -Drivers should be able to test their server discovery and monitoring logic without -any network I/O, by parsing hello (or legacy hello) and application error from the -test file and passing them into the driver code. Parts of the client and -monitoring code may need to be mocked or subclassed to achieve this. -`A reference implementation for PyMongo 3.10.1 is available here -`_. - -Initialization -~~~~~~~~~~~~~~ - -For each file, create a fresh client object initialized with the file's "uri". - -All files in the "single" directory include a connection string with one host -and no "replicaSet" option. -Set the client's initial TopologyType to Single, however that is achieved using the client's API. -(The spec says "The user MUST be able to set the initial TopologyType to Single" -without specifying how.) - -All files in the "sharded" directory include a connection string with multiple hosts -and no "replicaSet" option. -Set the client's initial TopologyType to Unknown or Sharded, depending on the client's API. - -All files in the "rs" directory include a connection string with a "replicaSet" option. -Set the client's initial TopologyType to ReplicaSetNoPrimary. -(For most clients, parsing a connection string with a "replicaSet" option -automatically sets the TopologyType to ReplicaSetNoPrimary.) - -Set up a listener to collect SDAM events published by the client, including -events published during client construction. - -Test Phases -~~~~~~~~~~~ - -For each phase in the file: - -#. Parse the "responses" array. Pass in the responses in order to the driver - code. If a response is the empty object ``{}``, simulate a network error. - -#. Parse the "applicationErrors" array. For each element, simulate the given - error as if it occurred while running an application operation. Note that - it is sufficient to construct a mock error and call the procedure which - updates the topology, e.g. - ``topology.handleApplicationError(address, generation, maxWireVersion, error)``. - -For non-monitoring tests, -once all responses are processed, assert that the phase's "outcome" object -is equivalent to the driver's current TopologyDescription. - -For monitoring tests, once all responses are processed, assert that the -events collected so far by the SDAM event listener are equivalent to the -events specified in the phase. - -Some fields such as "logicalSessionTimeoutMinutes", "compatible", and -"topologyVersion" were added later and haven't been added to all test files. -If these fields are present, test that they are equivalent to the fields of -the driver's current TopologyDescription or ServerDescription. - -For monitoring tests, clear the list of events collected so far. - -Continue until all phases have been executed. - -Integration Tests ------------------ - -Integration tests are provided in the "integration" directory. - -Test Format -~~~~~~~~~~~ - -The same as the `Transactions Spec Test format -`_ with the following -additions: - -- The ``runOn`` requirement gains a new field: - - - ``authEnabled`` (optional): If True, skip this test if auth is not enabled. - If False, skip this test if auth is enabled. If this field is omitted, - this test can be run on clusters with or without auth. - -Special Test Operations -~~~~~~~~~~~~~~~~~~~~~~~ - -Certain operations that appear in the "operations" array do not correspond to -API methods but instead represent special test operations. Such operations are -defined on the "testRunner" object and are documented in the -`Transactions Spec Test -`_. - -Additional, SDAM test specific operations are documented here: - -configureFailPoint -'''''''''''''''''' - -The "configureFailPoint" operation instructs the test runner to configure -the given server failpoint on the "admin" database. The runner MUST disable -this failpoint at the end of the test. For example:: - - - name: configureFailPoint - object: testRunner - arguments: - failPoint: - configureFailPoint: failCommand - mode: { times: 1 } - data: - failCommands: ["insert"] - closeConnection: true - -Tests that use the "configureFailPoint" operation do not include -``configureFailPoint`` commands in their command expectations. Drivers MUST -ensure that ``configureFailPoint`` commands do not appear in the list of logged -commands, either by manually filtering it from the list of observed commands or -by using a different MongoClient to execute ``configureFailPoint``. - -Note, similar to the ``tests.failPoint`` field described in the `Transactions -Spec Test format `_ tests -with ``useMultipleMongoses: true`` will not contain a ``configureFailPoint`` -operation. - -wait -'''' - -The "wait" operation instructs the test runner to sleep for "ms" -milliseconds. For example:: - - - name: wait - object: testRunner - arguments: - ms: 1000 - -waitForEvent -'''''''''''' - -The "waitForEvent" operation instructs the test runner to wait until the test's -MongoClient has published a specific event a given number of times. For -example, the following instructs the test runner to wait for at least one -PoolClearedEvent to be published:: - - - name: waitForEvent - object: testRunner - arguments: - event: PoolClearedEvent - count: 1 - -Note that "count" includes events that were published while running previous -operations. - -If the "waitForEvent" operation is not satisfied after 10 seconds, the -operation is considered an error. - -ServerMarkedUnknownEvent -```````````````````````` - -The ServerMarkedUnknownEvent may appear as an event in `waitForEvent`_ and -`assertEventCount`_. This event is defined as ServerDescriptionChangedEvent -where newDescription.type is ``Unknown``. - -assertEventCount -'''''''''''''''' - -The "assertEventCount" operation instructs the test runner to assert the test's -MongoClient has published a specific event a given number of times. For -example, the following instructs the test runner to assert that a single -PoolClearedEvent was published:: - - - name: assertEventCount - object: testRunner - arguments: - event: PoolClearedEvent - count: 1 - -recordPrimary -''''''''''''' - -The "recordPrimary" operation instructs the test runner to record the current -primary of the test's MongoClient. For example:: - - - name: recordPrimary - object: testRunner - -runAdminCommand -''''''''''''''' - -The "runAdminCommand" operation instructs the test runner to run the given -command on the admin database. Drivers MUST run this command on a different -MongoClient from the one used for test operations. For example:: - - - name: runAdminCommand - object: testRunner - command_name: replSetFreeze - arguments: - command: - replSetFreeze: 0 - readPreference: - mode: Secondary - -waitForPrimaryChange -'''''''''''''''''''' - -The "waitForPrimaryChange" operation instructs the test runner to wait up to -"timeoutMS" milliseconds for the MongoClient to discover a new primary server. -The new primary should be different from the one recorded by "recordPrimary". -For example:: - - - name: waitForPrimaryChange - object: testRunner - arguments: - timeoutMS: 15000 - -To implement, Drivers can subscribe to ServerDescriptionChangedEvents and wait -for an event where newDescription.type is ``RSPrimary`` and the address is -different from the one previously recorded by "recordPrimary". - -startThread -''''''''''' - -The "startThread" operation instructs the test runner to start a new thread -with the provided "name". The `runOnThread`_ and `waitForThread`_ operations -reference a thread by its "name". For example:: - - - name: startThread - object: testRunner - arguments: - name: thread1 - -runOnThread -''''''''''' - -The "runOnThread" operation instructs the test runner to schedule an operation -to be run on the given thread. runOnThread MUST NOT wait for the scheduled -operation to complete. For example:: - - - name: runOnThread - object: testRunner - arguments: - name: thread1 - operation: - name: insertOne - object: collection - arguments: - document: - _id: 2 - error: true - -waitForThread -''''''''''''' - -The "waitForThread" operation instructs the test runner to stop the given -thread, wait for it to complete, and assert that the thread exited without -any errors. For example:: - - - name: waitForThread - object: testRunner - arguments: - name: thread1 - -Prose Tests ------------ - -The following prose tests cannot be represented as spec tests and MUST be -implemented. - -Streaming protocol Tests -~~~~~~~~~~~~~~~~~~~~~~~~ - -Drivers that implement the streaming protocol (multi-threaded or -asynchronous drivers) must implement the following tests. Each test should be -run against a standalone, replica set, and sharded cluster unless otherwise -noted. - -Some of these cases should already be tested with the old protocol; in -that case just verify the test cases succeed with the new protocol. - -1. Configure the client with heartbeatFrequencyMS set to 500, - overriding the default of 10000. Assert the client processes - hello and legacy hello replies more frequently (approximately every 500ms). - -RTT Tests -~~~~~~~~~ - -Run the following test(s) on MongoDB 4.4+. - -1. Test that RTT is continuously updated. - - #. Create a client with ``heartbeatFrequencyMS=500``, - ``appName=streamingRttTest``, and subscribe to server events. - - #. Run a find command to wait for the server to be discovered. - - #. Sleep for 2 seconds. This must be long enough for multiple heartbeats - to succeed. - - #. Assert that each ``ServerDescriptionChangedEvent`` includes a non-zero - RTT. - - #. Configure the following failpoint to block hello or legacy hello commands - for 250ms which should add extra latency to each RTT check:: - - db.adminCommand({ - configureFailPoint: "failCommand", - mode: {times: 1000}, - data: { - failCommands: ["hello"], // or the legacy hello command - blockConnection: true, - blockTimeMS: 500, - appName: "streamingRttTest", - }, - }); - - #. Wait for the server's RTT to exceed 250ms. Eventually the average RTT - should also exceed 500ms but we use 250ms to speed up the test. Note - that the `Server Description Equality`_ rule means that - ServerDescriptionChangedEvents will not be published. This test may - need to use a driver specific helper to obtain the latest RTT instead. - If the RTT does not exceed 250ms after 10 seconds, consider the test - failed. - - #. Disable the failpoint:: - - db.adminCommand({ - configureFailPoint: "failCommand", - mode: "off", - }); - -.. Section for links. - -.. _Server Description Equality: /source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#server-description-equality diff --git a/testdata/server-discovery-and-monitoring/monitoring/README.rst b/testdata/server-discovery-and-monitoring/monitoring/README.rst deleted file mode 100644 index 7c741544ec..0000000000 --- a/testdata/server-discovery-and-monitoring/monitoring/README.rst +++ /dev/null @@ -1,12 +0,0 @@ -===================== -SDAM Monitoring Tests -===================== - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the SDAM Monitoring spec. - -Format ------- - -The format of the tests follows the standard SDAM test and should be able to leverage -the existing test runner in each language for the SDAM tests. diff --git a/testdata/server-selection/README.rst b/testdata/server-selection/README.rst deleted file mode 100644 index 62a6abce37..0000000000 --- a/testdata/server-selection/README.rst +++ /dev/null @@ -1,73 +0,0 @@ -====================== -Server Selection Tests -====================== - -This directory contains platform-independent tests that drivers can use -to prove their conformance to the Server Selection spec. The tests -are provided in both YAML and JSON formats, and drivers may test against -whichever format is more convenient for them. - -Version -------- - -Specifications have no version scheme. -They are not tied to a MongoDB server version, -and it is our intention that each specification moves from "draft" to "final" -with no further versions; it is superseded by a future spec, not revised. - -However, implementers must have stable sets of tests to target. -As test files evolve they will be occasionally tagged like -"server-selection-tests-2015-01-04", until the spec is final. - -Test Format and Use -------------------- - -There are two types of tests for the server selection spec, tests for -round trip time (RTT) calculation, and tests for server selection logic. - -Drivers should be able to test their server selection logic -without any network I/O, by parsing topology descriptions and read preference -documents from the test files and passing them into driver code. Parts of the -server selection code may need to be mocked or subclassed to achieve this. - -RTT Calculation Tests ->>>>>>>>>>>>>>>>>>>>> - -These YAML files contain the following keys: - -- ``avg_rtt_ms``: a server's previous average RTT, in milliseconds -- ``new_rtt_ms``: a new RTT value for this server, in milliseconds -- ``new_avg_rtt``: this server's newly-calculated average RTT, in milliseconds - -For each file, create a server description object initialized with ``avg_rtt_ms``. -Parse ``new_rtt_ms``, and ensure that the new RTT value for the mocked server -description is equal to ``new_avg_rtt``. - -If driver architecture doesn't easily allow construction of server description -objects in isolation, unit testing the EWMA algorithm using these inputs -and expected outputs is acceptable. - -Server Selection Logic Tests ->>>>>>>>>>>>>>>>>>>>>>>>>>>> - -These YAML files contain the following setup for each test: - -- ``topology_description``: the state of a mocked cluster -- ``operation``: the kind of operation to perform, either read or write -- ``read_preference``: a read preference document - -For each file, create a new TopologyDescription object initialized with the values -from ``topology_description``. Create a ReadPreference object initialized with the -values from ``read_preference``. - -Together with "operation", pass the newly-created TopologyDescription and ReadPreference -to server selection, and ensure that it selects the correct subset of servers from -the TopologyDescription. Each YAML file contains a key for these stages of server selection: - -- ``suitable_servers``: the set of servers in topology_description that are suitable, as - per the Server Selection spec, given operation and read_preference -- ``in_latency_window``: the set of suitable_servers that fall within the latency window - -Drivers implementing server selection MUST test that their implementation -correctly returns the set of servers in ``in_latency_window``. Drivers SHOULD also test -against ``suitable_servers`` if possible. diff --git a/testdata/sessions/README.rst b/testdata/sessions/README.rst deleted file mode 100644 index db90ce84af..0000000000 --- a/testdata/sessions/README.rst +++ /dev/null @@ -1,244 +0,0 @@ -==================== -Driver Session Tests -==================== - -.. contents:: - ----- - -Introduction -============ - -The YAML and JSON files in this directory are platform-independent tests -meant to exercise a driver's implementation of sessions. These tests utilize the -`Unified Test Format <../../unified-test-format/unified-test-format.rst>`__. - -Several prose tests, which are not easily expressed in YAML, are also presented -in the Driver Sessions Spec. Those tests will need to be manually implemented -by each driver. - -Snapshot session tests -====================== -Snapshot sessions tests require server of version 5.0 or higher and -replica set or a sharded cluster deployment. -Default snapshot history window on the server is 5 minutes. Running the test in debug mode, or in any other slow configuration -may lead to `SnapshotTooOld` errors. Drivers can work around this issue by increasing the server's `minSnapshotHistoryWindowInSeconds` parameter, for example: - -.. code:: python - - client.admin.command('setParameter', 1, minSnapshotHistoryWindowInSeconds=60) - -Prose tests -``````````` - -1. Setting both ``snapshot`` and ``causalConsistency`` to true is not allowed -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* ``client.startSession(snapshot = true, causalConsistency = true)`` -* Assert that an error was raised by driver - -2. Pool is LIFO -~~~~~~~~~~~~~~~ - -This test applies to drivers with session pools. - -* Call ``MongoClient.startSession`` twice to create two sessions, let us call them ``A`` and ``B``. -* Call ``A.endSession``, then ``B.endSession``. -* Call ``MongoClient.startSession``: the resulting session must have the same session ID as ``B``. -* Call ``MongoClient.startSession`` again: the resulting session must have the same session ID as ``A``. - -3. ``$clusterTime`` in commands -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Turn ``heartbeatFrequencyMS`` up to a very large number. -* Register a command-started and a command-succeeded APM listener. If the driver has no APM support, inspect commands/replies in another idiomatic way, such as monkey-patching or a mock server. -* Send a ``ping`` command to the server with the generic ``runCommand`` method. -* Assert that the command passed to the command-started listener includes ``$clusterTime`` if and only if ``maxWireVersion`` >= 6. -* Record the ``$clusterTime``, if any, in the reply passed to the command-succeeded APM listener. -* Send another ``ping`` command. -* Assert that ``$clusterTime`` in the command passed to the command-started listener, if any, equals the ``$clusterTime`` in the previous server reply. (Turning ``heartbeatFrequencyMS`` up prevents an intervening heartbeat from advancing the ``$clusterTime`` between these final two steps.) - -Repeat the above for: - -* An aggregate command from the ``aggregate`` helper method -* A find command from the ``find`` helper method -* An insert command from the ``insert_one`` helper method - -4. Explicit and implicit session arguments -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Register a command-started APM listener. If the driver has no APM support, inspect commands in another idiomatic way, such as monkey-patching or a mock server. -* Create ``client1`` -* Get ``database`` from ``client1`` -* Get ``collection`` from ``database`` -* Start ``session`` from ``client1`` -* Call ``collection.insertOne(session,...)`` -* Assert that the command passed to the command-started listener contained the session ``lsid`` from ``session``. -* Call ``collection.insertOne(,...)`` (*without* a session argument) -* Assert that the command passed to the command-started listener contained a session ``lsid``. - -Repeat the above for all methods that take a session parameter. - -5. Session argument is for the right client -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Create ``client1`` and ``client2`` -* Get ``database`` from ``client1`` -* Get ``collection`` from ``database`` -* Start ``session`` from ``client2`` -* Call ``collection.insertOne(session,...)`` -* Assert that an error was reported because ``session`` was not started from ``client1`` - -Repeat the above for all methods that take a session parameter. - -6. No further operations can be performed using a session after ``endSession`` has been called -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Start a ``session`` -* End the ``session`` -* Call ``collection.InsertOne(session, ...)`` -* Assert that the proper error was reported - -Repeat the above for all methods that take a session parameter. - -If your driver implements a platform dependent idiomatic disposal pattern, test -that also (if the idiomatic disposal pattern calls ``endSession`` it would be -sufficient to only test the disposal pattern since that ends up calling -``endSession``). - -7. Authenticating as multiple users suppresses implicit sessions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Skip this test if your driver does not allow simultaneous authentication with multiple users. - -* Authenticate as two users -* Call ``findOne`` with no explicit session -* Capture the command sent to the server -* Assert that the command sent to the server does not have an ``lsid`` field - -8. Client-side cursor that exhausts the results on the initial query immediately returns the implicit session to the pool -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Insert two documents into a collection -* Execute a find operation on the collection and iterate past the first document -* Assert that the implicit session is returned to the pool. This can be done in several ways: - - * Track in-use count in the server session pool and assert that the count has dropped to zero - * Track the lsid used for the find operation (e.g. with APM) and then do another operation and - assert that the same lsid is used as for the find operation. - -9. Client-side cursor that exhausts the results after a ``getMore`` immediately returns the implicit session to the pool -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Insert five documents into a collection -* Execute a find operation on the collection with batch size of 3 -* Iterate past the first four documents, forcing the final ``getMore`` operation -* Assert that the implicit session is returned to the pool prior to iterating past the last document - -10. No remaining sessions are checked out after each functional test -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -At the end of every individual functional test of the driver, there SHOULD be an -assertion that there are no remaining sessions checked out from the pool. This -may require changes to existing tests to ensure that they close any explicit -client sessions and any unexhausted cursors. - -11. For every combination of topology and readPreference, ensure that ``find`` and ``getMore`` both send the same session id -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Insert three documents into a collection -* Execute a ``find`` operation on the collection with a batch size of 2 -* Assert that the server receives a non-zero lsid -* Iterate through enough documents (3) to force a ``getMore`` -* Assert that the server receives a non-zero lsid equal to the lsid that ``find`` sent. - -12. Session pool can be cleared after forking without calling ``endSession`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Skip this test if your driver does not allow forking. - -* Create ClientSession -* Record its lsid -* Delete it (so the lsid is pushed into the pool) -* Fork -* In the parent, create a ClientSession and assert its lsid is the same. -* In the child, create a ClientSession and assert its lsid is different. - -13. Existing sessions are not checked into a cleared pool after forking -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Skip this test if your driver does not allow forking. - -* Create ClientSession -* Record its lsid -* Fork -* In the parent, return the ClientSession to the pool, create a new ClientSession, and assert its lsid is the same. -* In the child, return the ClientSession to the pool, create a new ClientSession, and assert its lsid is different. - -14. Implicit sessions only allocate their server session after a successful connection checkout -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Create a MongoClient with the following options: ``maxPoolSize=1`` and ``retryWrites=true``. If testing against a sharded deployment, the test runner MUST ensure that the MongoClient connects to only a single mongos host. -* Attach a command started listener that collects each command's lsid -* Initiate the following concurrent operations - - * ``insertOne({ }),`` - * ``deleteOne({ }),`` - * ``updateOne({ }, { $set: { a: 1 } }),`` - * ``bulkWrite([{ updateOne: { filter: { }, update: { $set: { a: 1 } } } }]),`` - * ``findOneAndDelete({ }),`` - * ``findOneAndUpdate({ }, { $set: { a: 1 } }),`` - * ``findOneAndReplace({ }, { a: 1 }),`` - * ``find().toArray()`` - -* Wait for all operations to complete successfully -* Assert the following across at least 5 retries of the above test: - - * Drivers MUST assert that exactly one session is used for all operations at - least once across the retries of this test. - * Note that it's possible, although rare, for >1 server session to be used - because the session is not released until after the connection is checked in. - * Drivers MUST assert that the number of allocated sessions is strictly less - than the number of concurrent operations in every retry of this test. In - this instance it would be less than (but NOT equal to) 8. - -15. ``lsid`` is added inside ``$query`` when using OP_QUERY -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This test only applies to drivers that have not implemented OP_MSG and still use OP_QUERY. - -* For a command to a mongos that includes a readPreference, verify that the - ``lsid`` on query commands is added inside the ``$query`` field, and NOT as a - top-level field. - -16. Authenticating as a second user after starting a session results in a server error -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This test only applies to drivers that allow authentication to be changed on the fly. - -* Authenticate as the first user -* Start a session by calling ``startSession`` -* Authenticate as a second user -* Call ``findOne`` using the session as an explicit session -* Assert that the driver returned an error because multiple users are authenticated - -17. Driver verifies that the session is owned by the current user -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This test only applies to drivers that allow authentication to be changed on the fly. - -* Authenticate as user A -* Start a session by calling ``startSession`` -* Logout user A -* Authenticate as user B -* Call ``findOne`` using the session as an explicit session -* Assert that the driver returned an error because the session is owned by a different user - -Changelog -========= - -:2019-05-15: Initial version. -:2021-06-15: Added snapshot-session tests. Introduced legacy and unified folders. -:2021-07-30: Use numbering for prose test -:2022-02-11: Convert legacy tests to unified format -:2022-06-13: Relocate prose test from spec document and apply new ordering diff --git a/testdata/transactions/README.rst b/testdata/transactions/README.rst deleted file mode 100644 index 496a6b2ef8..0000000000 --- a/testdata/transactions/README.rst +++ /dev/null @@ -1,663 +0,0 @@ -================== -Transactions Tests -================== - -.. contents:: - ----- - -Introduction -============ - -The YAML and JSON files in the ``legacy`` and ``unified`` sub-directories are -platform-independent tests that drivers can use to prove their conformance to -the Transactions Spec. The tests in the ``legacy`` directory are designed with -the intention of sharing some test-runner code with the CRUD Spec tests and the -Command Monitoring Spec tests. The format for these tests and instructions for -executing them are provided in the following sections. Tests in the -``unified`` directory are written using the `Unified Test Format -<../../unified-test-format/unified-test-format.rst>`_. - -Several prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Server Fail Point -================= - -failCommand -``````````` - -Some tests depend on a server fail point, expressed in the ``failPoint`` field. -For example the ``failCommand`` fail point allows the client to force the -server to return an error. Keep in mind that the fail point only triggers for -commands listed in the "failCommands" field. See `SERVER-35004`_ and -`SERVER-35083`_ for more information. - -.. _SERVER-35004: https://jira.mongodb.org/browse/SERVER-35004 -.. _SERVER-35083: https://jira.mongodb.org/browse/SERVER-35083 - -The ``failCommand`` fail point may be configured like so:: - - db.adminCommand({ - configureFailPoint: "failCommand", - mode: , - data: { - failCommands: ["commandName", "commandName2"], - closeConnection: , - errorCode: , - writeConcernError: , - appName: , - blockConnection: , - blockTimeMS: , - } - }); - -``mode`` is a generic fail point option and may be assigned a string or document -value. The string values ``"alwaysOn"`` and ``"off"`` may be used to enable or -disable the fail point, respectively. A document may be used to specify either -``times`` or ``skip``, which are mutually exclusive: - -- ``{ times: }`` may be used to limit the number of times the fail - point may trigger before transitioning to ``"off"``. -- ``{ skip: }`` may be used to defer the first trigger of a fail - point, after which it will transition to ``"alwaysOn"``. - -The ``data`` option is a document that may be used to specify options that -control the fail point's behavior. ``failCommand`` supports the following -``data`` options, which may be combined if desired: - -- ``failCommands``: Required, the list of command names to fail. -- ``closeConnection``: Boolean option, which defaults to ``false``. If - ``true``, the command will not be executed, the connection will be closed, and - the client will see a network error. -- ``errorCode``: Integer option, which is unset by default. If set, the command - will not be executed and the specified command error code will be returned as - a command error. -- ``appName``: A string to filter which MongoClient should be affected by - the failpoint. `New in mongod 4.4.0-rc2 `_. -- ``blockConnection``: Whether the server should block the affected commands. - Default false. -- ``blockTimeMS``: The number of milliseconds the affect commands should be - blocked for. Required when blockConnection is true. - `New in mongod 4.3.4 `_. - -Speeding Up Tests -================= - -See `Speeding Up Tests <../../retryable-reads/tests/README.rst#speeding-up-tests>`_ in the retryable reads spec tests. - -Test Format -=========== - -Each YAML file has the following keys: - -- ``runOn`` (optional): An array of server version and/or topology requirements - for which the tests can be run. If the test environment satisfies one or more - of these requirements, the tests may be executed; otherwise, this file should - be skipped. If this field is omitted, the tests can be assumed to have no - particular requirements and should be executed. Each element will have some or - all of the following fields: - - - ``minServerVersion`` (optional): The minimum server version (inclusive) - required to successfully run the tests. If this field is omitted, it should - be assumed that there is no lower bound on the required server version. - - - ``maxServerVersion`` (optional): The maximum server version (inclusive) - against which the tests can be run successfully. If this field is omitted, - it should be assumed that there is no upper bound on the required server - version. - - - ``topology`` (optional): An array of server topologies against which the - tests can be run successfully. Valid topologies are "single", "replicaset", - and "sharded". If this field is omitted, the default is all topologies (i.e. - ``["single", "replicaset", "sharded"]``). - - - ``serverless``: Optional string. Whether or not the test should be run on - serverless instances imitating sharded clusters. Valid values are "require", - "forbid", and "allow". If "require", the test MUST only be run on serverless - instances. If "forbid", the test MUST NOT be run on serverless instances. If - omitted or "allow", this option has no effect. - - The test runner MUST be informed whether or not serverless is being used in - order to determine if this requirement is met (e.g. through an environment - variable or configuration option). Since the serverless proxy imitates a - mongos, the runner is not capable of determining this by issuing a server - command such as ``buildInfo`` or ``hello``. - -- ``database_name`` and ``collection_name``: The database and collection to use - for testing. - -- ``data``: The data that should exist in the collection under test before each - test run. - -- ``tests``: An array of tests that are to be run independently of each other. - Each test will have some or all of the following fields: - - - ``description``: The name of the test. - - - ``skipReason``: Optional, string describing why this test should be - skipped. - - - ``useMultipleMongoses`` (optional): If ``true``, the MongoClient for this - test should be initialized with multiple mongos seed addresses. If ``false`` - or omitted, only a single mongos address should be specified. This field has - no effect for non-sharded topologies. - - - ``clientOptions``: Optional, parameters to pass to MongoClient(). - - - ``failPoint``: Optional, a server failpoint to enable expressed as the - configureFailPoint command to run on the admin database. This option and - ``useMultipleMongoses: true`` are mutually exclusive. - - - ``sessionOptions``: Optional, map of session names (e.g. "session0") to - parameters to pass to MongoClient.startSession() when creating that session. - - - ``operations``: Array of documents, each describing an operation to be - executed. Each document has the following fields: - - - ``name``: The name of the operation on ``object``. - - - ``object``: The name of the object to perform the operation on. Can be - "database", "collection", "session0", "session1", or "testRunner". See - the "targetedFailPoint" operation in `Special Test Operations`_. - - - ``collectionOptions``: Optional, parameters to pass to the Collection() - used for this operation. - - - ``databaseOptions``: Optional, parameters to pass to the Database() - used for this operation. - - - ``command_name``: Present only when ``name`` is "runCommand". The name - of the command to run. Required for languages that are unable preserve - the order keys in the "command" argument when parsing JSON/YAML. - - - ``arguments``: Optional, the names and values of arguments. - - - ``error``: Optional. If true, the test should expect an error or - exception. This could be a server-generated or a driver-generated error. - - - ``result``: The return value from the operation, if any. This field may - be a single document or an array of documents in the case of a - multi-document read. If the operation is expected to return an error, the - ``result`` is a single document that has one or more of the following - fields: - - - ``errorContains``: A substring of the expected error message. - - - ``errorCodeName``: The expected "codeName" field in the server - error response. - - - ``errorLabelsContain``: A list of error label strings that the - error is expected to have. - - - ``errorLabelsOmit``: A list of error label strings that the - error is expected not to have. - - - ``expectations``: Optional list of command-started events. - - - ``outcome``: Document describing the return value and/or expected state of - the collection after the operation is executed. Contains the following - fields: - - - ``collection``: - - - ``data``: The data that should exist in the collection after the - operations have run, sorted by "_id". - -Use as Integration Tests -======================== - -Run a MongoDB replica set with a primary, a secondary, and an arbiter, -**server version 4.0.0 or later**. (Including a secondary ensures that -server selection in a transaction works properly. Including an arbiter helps -ensure that no new bugs have been introduced related to arbiters.) - -A driver that implements support for sharded transactions MUST also run these -tests against a MongoDB sharded cluster with multiple mongoses and -**server version 4.2 or later**. Some tests require -initializing the MongoClient with multiple mongos seeds to ensures that mongos -transaction pinning and the recoveryToken works properly. - -Load each YAML (or JSON) file using a Canonical Extended JSON parser. - -Then for each element in ``tests``: - -#. If the ``skipReason`` field is present, skip this test completely. -#. Create a MongoClient and call - ``client.admin.runCommand({killAllSessions: []})`` to clean up any open - transactions from previous test failures. Ignore a command failure with - error code 11601 ("Interrupted") to work around `SERVER-38335`_. - - - Running ``killAllSessions`` cleans up any open transactions from - a previously failed test to prevent the current test from blocking. - It is sufficient to run this command once before starting the test suite - and once after each failed test. - - When testing against a sharded cluster run this command on ALL mongoses. - -#. Create a collection object from the MongoClient, using the ``database_name`` - and ``collection_name`` fields of the YAML file. -#. Drop the test collection, using writeConcern "majority". -#. Execute the "create" command to recreate the collection, using writeConcern - "majority". (Creating the collection inside a transaction is prohibited, so - create it explicitly.) -#. If the YAML file contains a ``data`` array, insert the documents in ``data`` - into the test collection, using writeConcern "majority". -#. When testing against a sharded cluster run a ``distinct`` command on the - newly created collection on all mongoses. For an explanation see, - `Why do tests that run distinct sometimes fail with StaleDbVersion?`_ -#. If ``failPoint`` is specified, its value is a configureFailPoint command. - Run the command on the admin database to enable the fail point. -#. Create a **new** MongoClient ``client``, with Command Monitoring listeners - enabled. (Using a new MongoClient for each test ensures a fresh session pool - that hasn't executed any transactions previously, so the tests can assert - actual txnNumbers, starting from 1.) Pass this test's ``clientOptions`` if - present. - - - When testing against a sharded cluster and ``useMultipleMongoses`` is - ``true`` the client MUST be created with multiple (valid) mongos seed - addresses. - -#. Call ``client.startSession`` twice to create ClientSession objects - ``session0`` and ``session1``, using the test's "sessionOptions" if they - are present. Save their lsids so they are available after calling - ``endSession``, see `Logical Session Id`_. -#. For each element in ``operations``: - - - If the operation ``name`` is a special test operation type, execute it and - go to the next operation, otherwise proceed to the next step. - - Enter a "try" block or your programming language's closest equivalent. - - Create a Database object from the MongoClient, using the ``database_name`` - field at the top level of the test file. - - Create a Collection object from the Database, using the - ``collection_name`` field at the top level of the test file. - If ``collectionOptions`` or ``databaseOptions`` is present, create the - Collection or Database object with the provided options, respectively. - Otherwise create the object with the default options. - - Execute the named method on the provided ``object``, passing the - arguments listed. Pass ``session0`` or ``session1`` to the method, - depending on which session's name is in the arguments list. - If ``arguments`` contains no "session", pass no explicit session to the - method. - - If the driver throws an exception / returns an error while executing this - series of operations, store the error message and server error code. - - If the operation's ``error`` field is ``true``, verify that the method - threw an exception or returned an error. - - If the result document has an "errorContains" field, verify that the - method threw an exception or returned an error, and that the value of the - "errorContains" field matches the error string. "errorContains" is a - substring (case-insensitive) of the actual error message. - - If the result document has an "errorCodeName" field, verify that the - method threw a command failed exception or returned an error, and that - the value of the "errorCodeName" field matches the "codeName" in the - server error response. - - If the result document has an "errorLabelsContain" field, verify that the - method threw an exception or returned an error. Verify that all of the - error labels in "errorLabelsContain" are present in the error or exception - using the ``hasErrorLabel`` method. - - If the result document has an "errorLabelsOmit" field, verify that the - method threw an exception or returned an error. Verify that none of the - error labels in "errorLabelsOmit" are present in the error or exception - using the ``hasErrorLabel`` method. - - If the operation returns a raw command response, eg from ``runCommand``, - then compare only the fields present in the expected result document. - Otherwise, compare the method's return value to ``result`` using the same - logic as the CRUD Spec Tests runner. - -#. Call ``session0.endSession()`` and ``session1.endSession``. -#. If the test includes a list of command-started events in ``expectations``, - compare them to the actual command-started events using the - same logic as the Command Monitoring Spec Tests runner, plus the rules in - the Command-Started Events instructions below. -#. If ``failPoint`` is specified, disable the fail point to avoid spurious - failures in subsequent tests. The fail point may be disabled like so:: - - db.adminCommand({ - configureFailPoint: , - mode: "off" - }); - -#. For each element in ``outcome``: - - - If ``name`` is "collection", verify that the test collection contains - exactly the documents in the ``data`` array. Ensure this find reads the - latest data by using **primary read preference** with - **local read concern** even when the MongoClient is configured with - another read preference or read concern. - Note the server does not guarantee that documents returned by a find - command will be in inserted order. This find MUST sort by ``{_id:1}``. - -.. _SERVER-38335: https://jira.mongodb.org/browse/SERVER-38335 - -Special Test Operations -``````````````````````` - -Certain operations that appear in the "operations" array do not correspond to -API methods but instead represent special test operations. Such operations are -defined on the "testRunner" object and documented here: - -targetedFailPoint -~~~~~~~~~~~~~~~~~ - -The "targetedFailPoint" operation instructs the test runner to configure a fail -point on a specific mongos. The mongos to run the ``configureFailPoint`` is -determined by the "session" argument (either "session0" or "session1"). -The session must already be pinned to a mongos server. The "failPoint" argument -is the ``configureFailPoint`` command to run. - -If a test uses ``targetedFailPoint``, disable the fail point after running -all ``operations`` to avoid spurious failures in subsequent tests. The fail -point may be disabled like so:: - - db.adminCommand({ - configureFailPoint: , - mode: "off" - }); - -Here is an example which instructs the test runner to enable the failCommand -fail point on the mongos server which "session0" is pinned to:: - - # Enable the fail point only on the Mongos that session0 is pinned to. - - name: targetedFailPoint - object: testRunner - arguments: - session: session0 - failPoint: - configureFailPoint: failCommand - mode: { times: 1 } - data: - failCommands: ["commitTransaction"] - closeConnection: true - -Tests that use the "targetedFailPoint" operation do not include -``configureFailPoint`` commands in their command expectations. Drivers MUST -ensure that ``configureFailPoint`` commands do not appear in the list of logged -commands, either by manually filtering it from the list of observed commands or -by using a different MongoClient to execute ``configureFailPoint``. - -assertSessionTransactionState -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The "assertSessionTransactionState" operation instructs the test runner to -assert that the transaction state of the given session is equal to the -specified value. The possible values are as follows: ``none``, ``starting``, -``in_progress``, ``committed``, ``aborted``:: - - - name: assertSessionTransactionState - object: testRunner - arguments: - session: session0 - state: in_progress - -assertSessionPinned -~~~~~~~~~~~~~~~~~~~ - -The "assertSessionPinned" operation instructs the test runner to assert that -the given session is pinned to a mongos:: - - - name: assertSessionPinned - object: testRunner - arguments: - session: session0 - -assertSessionUnpinned -~~~~~~~~~~~~~~~~~~~~~ - -The "assertSessionUnpinned" operation instructs the test runner to assert that -the given session is not pinned to a mongos:: - - - name: assertSessionPinned - object: testRunner - arguments: - session: session0 - -assertCollectionExists -~~~~~~~~~~~~~~~~~~~~~~ - -The "assertCollectionExists" operation instructs the test runner to assert that -the given collection exists in the database:: - - - name: assertCollectionExists - object: testRunner - arguments: - database: db - collection: test - -Use a ``listCollections`` command to check whether the collection exists. Note -that it is currently not possible to run ``listCollections`` from within a -transaction. - -assertCollectionNotExists -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The "assertCollectionNotExists" operation instructs the test runner to assert -that the given collection does not exist in the database:: - - - name: assertCollectionNotExists - object: testRunner - arguments: - database: db - collection: test - -Use a ``listCollections`` command to check whether the collection exists. Note -that it is currently not possible to run ``listCollections`` from within a -transaction. - -assertIndexExists -~~~~~~~~~~~~~~~~~ - -The "assertIndexExists" operation instructs the test runner to assert that the -index with the given name exists on the collection:: - - - name: assertIndexExists - object: testRunner - arguments: - database: db - collection: test - index: t_1 - -Use a ``listIndexes`` command to check whether the index exists. Note that it is -currently not possible to run ``listIndexes`` from within a transaction. - -assertIndexNotExists -~~~~~~~~~~~~~~~~~~~~ - -The "assertIndexNotExists" operation instructs the test runner to assert that -the index with the given name does not exist on the collection:: - - - name: assertIndexNotExists - object: testRunner - arguments: - database: db - collection: test - index: t_1 - -Use a ``listIndexes`` command to check whether the index exists. Note that it is -currently not possible to run ``listIndexes`` from within a transaction. - -Command-Started Events -`````````````````````` - -The event listener used for these tests MUST ignore the security commands -listed in the Command Monitoring Spec. - -Logical Session Id -~~~~~~~~~~~~~~~~~~ - -Each command-started event in ``expectations`` includes an ``lsid`` with the -value "session0" or "session1". Tests MUST assert that the command's actual -``lsid`` matches the id of the correct ClientSession named ``session0`` or -``session1``. - -Null Values -~~~~~~~~~~~ - -Some command-started events in ``expectations`` include ``null`` values for -fields such as ``txnNumber``, ``autocommit``, and ``writeConcern``. -Tests MUST assert that the actual command **omits** any field that has a -``null`` value in the expected command. - -Cursor Id -^^^^^^^^^ - -A ``getMore`` value of ``"42"`` in a command-started event is a fake cursorId -that MUST be ignored. (In the Command Monitoring Spec tests, fake cursorIds are -correlated with real ones, but that is not necessary for Transactions Spec -tests.) - -afterClusterTime -^^^^^^^^^^^^^^^^ - -A ``readConcern.afterClusterTime`` value of ``42`` in a command-started event -is a fake cluster time. Drivers MUST assert that the actual command includes an -afterClusterTime. - -recoveryToken -^^^^^^^^^^^^^ - -A ``recoveryToken`` value of ``42`` in a command-started event is a -placeholder for an arbitrary recovery token. Drivers MUST assert that the -actual command includes a "recoveryToken" field and SHOULD assert that field -is a BSON document. - -Mongos Pinning Prose Tests -========================== - -The following tests ensure that a ClientSession is properly unpinned after -a sharded transaction. Initialize these tests with a MongoClient connected -to multiple mongoses. - -These tests use a cursor's address field to track which server an operation -was run on. If this is not possible in your driver, use command monitoring -instead. - -#. Test that starting a new transaction on a pinned ClientSession unpins the - session and normal server selection is performed for the next operation. - - .. code:: python - - @require_server_version(4, 1, 6) - @require_mongos_count_at_least(2) - def test_unpin_for_next_transaction(self): - # Increase localThresholdMS and wait until both nodes are discovered - # to avoid false positives. - client = MongoClient(mongos_hosts, localThresholdMS=1000) - wait_until(lambda: len(client.nodes) > 1) - # Create the collection. - client.test.test.insert_one({}) - with client.start_session() as s: - # Session is pinned to Mongos. - with s.start_transaction(): - client.test.test.insert_one({}, session=s) - - addresses = set() - for _ in range(50): - with s.start_transaction(): - cursor = client.test.test.find({}, session=s) - assert next(cursor) - addresses.add(cursor.address) - - assert len(addresses) > 1 - -#. Test non-transaction operations using a pinned ClientSession unpins the - session and normal server selection is performed. - - .. code:: python - - @require_server_version(4, 1, 6) - @require_mongos_count_at_least(2) - def test_unpin_for_non_transaction_operation(self): - # Increase localThresholdMS and wait until both nodes are discovered - # to avoid false positives. - client = MongoClient(mongos_hosts, localThresholdMS=1000) - wait_until(lambda: len(client.nodes) > 1) - # Create the collection. - client.test.test.insert_one({}) - with client.start_session() as s: - # Session is pinned to Mongos. - with s.start_transaction(): - client.test.test.insert_one({}, session=s) - - addresses = set() - for _ in range(50): - cursor = client.test.test.find({}, session=s) - assert next(cursor) - addresses.add(cursor.address) - - assert len(addresses) > 1 - -Q & A -===== - -Why do some tests appear to hang for 60 seconds on a sharded cluster? -````````````````````````````````````````````````````````````````````` - -There are two cases where this can happen. When the initial commitTransaction -attempt fails on mongos A and is retried on mongos B, mongos B will block -waiting for the transaction to complete. However because the initial commit -attempt failed, the command will only complete after the transaction is -automatically aborted for exceeding the shard's -transactionLifetimeLimitSeconds setting. `SERVER-39726`_ requests that -recovering the outcome of an uncommitted transaction should immediately abort -the transaction. - -The second case is when a *single-shard* transaction is committed successfully -on mongos A and then explicitly committed again on mongos B. Mongos B will also -block until the transactionLifetimeLimitSeconds timeout is hit at which point -``{ok:1}`` will be returned. `SERVER-39349`_ requests that recovering the -outcome of a completed single-shard transaction should not block. -Note that this test suite only includes single shard transactions. - -To workaround these issues, drivers SHOULD decrease the transaction timeout -setting by running setParameter **on each shard**. Setting the timeout to 3 -seconds significantly speeds up the test suite without a high risk of -prematurely timing out any tests' transactions. To decrease the timeout, run:: - - db.adminCommand( { setParameter: 1, transactionLifetimeLimitSeconds: 3 } ) - -Note that mongo-orchestration >=0.6.13 automatically sets this timeout to 3 -seconds so drivers using mongo-orchestration do not need to run these commands -manually. - -.. _SERVER-39726: https://jira.mongodb.org/browse/SERVER-39726 - -.. _SERVER-39349: https://jira.mongodb.org/browse/SERVER-39349 - -Why do tests that run distinct sometimes fail with StaleDbVersion? -`````````````````````````````````````````````````````````````````` - -When a shard receives its first command that contains a dbVersion, the shard -returns a StaleDbVersion error and the Mongos retries the operation. In a -sharded transaction, Mongos does not retry these operations and instead returns -the error to the client. For example:: - - Command distinct failed: Transaction aa09e296-472a-494f-8334-48d57ab530b6:1 was aborted on statement 0 due to: an error from cluster data placement change :: caused by :: got stale databaseVersion response from shard sh01 at host localhost:27217 :: caused by :: don't know dbVersion. - -To workaround this limitation, a driver test runner MUST run a -non-transactional ``distinct`` command on each Mongos before running any test -that uses ``distinct``. To ease the implementation drivers can simply run -``distinct`` before *every* test. - -Note that drivers can remove this workaround once `SERVER-39704`_ is resolved -so that mongos retries this operation transparently. The ``distinct`` command -is the only command allowed in a sharded transaction that uses the -``dbVersion`` concept so it is the only command affected. - -.. _SERVER-39704: https://jira.mongodb.org/browse/SERVER-39704 - -Changelog -========= - -:2019-05-15: Add operation level ``error`` field to assert any error. -:2019-03-25: Add workaround for StaleDbVersion on distinct. -:2019-03-01: Add top-level ``runOn`` field to denote server version and/or - topology requirements requirements for the test file. Removes the - ``topology`` top-level field, which is now expressed within - ``runOn`` elements. -:2019-02-28: ``useMultipleMongoses: true`` and non-targeted fail points are - mutually exclusive. -:2019-02-13: Modify test format for 4.2 sharded transactions, including - "useMultipleMongoses", ``object: testRunner``, the - ``targetedFailPoint`` operation, and recoveryToken assertions. diff --git a/testdata/uri-options/README.rst b/testdata/uri-options/README.rst deleted file mode 100644 index f6a128bba9..0000000000 --- a/testdata/uri-options/README.rst +++ /dev/null @@ -1,54 +0,0 @@ -======================= -URI Options Tests -======================= - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the URI Options spec. - -These tests use the same format as the Connection String spec tests. - -Version -------- - -Files in the "specifications" repository have no version scheme. They are not -tied to a MongoDB server version. - -Format ------- - -Each YAML file contains an object with a single ``tests`` key. This key is an -array of test case objects, each of which have the following keys: - -- ``description``: A string describing the test. -- ``uri``: A string containing the URI to be parsed. -- ``valid``: A boolean indicating if the URI should be considered valid. -- ``warning``: A boolean indicating whether URI parsing should emit a warning. -- ``hosts``: Included for compatibility with the Connection String spec tests. This will always be ``~``. -- ``auth``: Included for compatibility with the Connection String spec tests. This will always be ``~``. -- ``options``: An object containing key/value pairs for each parsed query string - option. - -If a test case includes a null value for one of these keys (e.g. ``auth: ~``, -``hosts: ~``), no assertion is necessary. This both simplifies parsing of the -test files (keys should always exist) and allows flexibility for drivers that -might substitute default values *during* parsing (e.g. omitted ``hosts`` could be -parsed as ``["localhost"]``). - -The ``valid`` and ``warning`` fields are boolean in order to keep the tests -flexible. We are not concerned with asserting the format of specific error or -warnings messages strings. - -Under normal circumstances, it should not be necessary to specify both -``valid: false`` and ``warning: true``. Typically, a URI test case will either -yield an error (e.g. options conflict) or a warning (e.g. invalid type or value -for an option), but not both. - -Use as unit tests -================= - -Testing whether a URI is valid or not requires testing whether URI parsing (or -MongoClient construction) causes a warning due to a URI option being invalid and asserting that the -options parsed from the URI match those listed in the ``options`` field. - -Note that there are tests for each of the options marked as optional; drivers will need to implement -logic to skip over the optional tests that they don’t implement. diff --git a/testdata/versioned-api/README.rst b/testdata/versioned-api/README.rst deleted file mode 100644 index a0b0599f64..0000000000 --- a/testdata/versioned-api/README.rst +++ /dev/null @@ -1,37 +0,0 @@ -=================== -Versioned API Tests -=================== - -.. contents:: - ----- - -Notes -===== - -This directory contains tests for the Versioned API specification. They are -implemented in the `Unified Test Format <../../unified-test-format/unified-test-format.rst>`__, -and require schema version 1.1. Note that to run these tests, the server must be -started with both ``enableTestCommands`` and ``acceptApiVersion2`` parameters -set to true. - -Testing with required API version -================================= - -Drivers MUST run their test suite against a cluster with the -``requireApiVersion`` parameter enabled and also requires authentication. - -To run this test, proceed as follows: -- Start a standalone mongod instance - -- Connect to the standalone instance and run the following command on the - ``admin`` database: ``{ setParameter: 1, requireApiVersion: true }`` - -- Declare an API version for the test run through the ``MONGODB_API_VERSION`` - environment variable. - -- If the environment variable is set, all clients created in tests MUST declare - the ``ServerApiVersion`` specified. - -No other topologies must be tested until ``mongo-orchestration`` can handle -servers with ``requireApiVersion`` enabled. diff --git a/x/mongo/driver/auth/auth.go b/x/mongo/driver/auth/auth.go index f5e4ee87f9..843715dc17 100644 --- a/x/mongo/driver/auth/auth.go +++ b/x/mongo/driver/auth/auth.go @@ -65,6 +65,11 @@ type HandshakeOptions struct { ClusterClock *session.ClusterClock ServerAPI *driver.ServerAPIOptions LoadBalanced bool + + // Fields provided by a library that wraps the Go Driver. + OuterLibraryName string + OuterLibraryVersion string + OuterLibraryPlatform string } type authHandshaker struct { @@ -94,7 +99,10 @@ func (ah *authHandshaker) GetHandshakeInformation( SASLSupportedMechs(ah.options.DBUser). ClusterClock(ah.options.ClusterClock). ServerAPI(ah.options.ServerAPI). - LoadBalanced(ah.options.LoadBalanced) + LoadBalanced(ah.options.LoadBalanced). + OuterLibraryName(ah.options.OuterLibraryName). + OuterLibraryVersion(ah.options.OuterLibraryVersion). + OuterLibraryPlatform(ah.options.OuterLibraryPlatform) if ah.options.Authenticator != nil { if speculativeAuth, ok := ah.options.Authenticator.(SpeculativeAuthenticator); ok { diff --git a/x/mongo/driver/auth/auth_spec_test.go b/x/mongo/driver/auth/auth_spec_test.go index 911c583e39..1c0701014d 100644 --- a/x/mongo/driver/auth/auth_spec_test.go +++ b/x/mongo/driver/auth/auth_spec_test.go @@ -13,7 +13,6 @@ import ( "path" "testing" - "go.mongodb.org/mongo-driver/v2/internal/mongoutil" "go.mongodb.org/mongo-driver/v2/internal/require" "go.mongodb.org/mongo-driver/v2/internal/spectest" "go.mongodb.org/mongo-driver/v2/mongo/options" @@ -59,14 +58,12 @@ func runTestsInFile(t *testing.T, dirname string, filename string) { func runTest(t *testing.T, filename string, test testCase) { t.Run(filename+":"+test.Description, func(t *testing.T) { - clientOptsBldr := options.Client().ApplyURI(test.URI) - - opts, _ := mongoutil.NewOptions[options.ClientOptions](clientOptsBldr) + opts := options.Client().ApplyURI(test.URI) if test.Valid { - require.NoError(t, clientOptsBldr.Validate()) + require.NoError(t, opts.Validate()) } else { - require.Error(t, clientOptsBldr.Validate()) + require.Error(t, opts.Validate()) return } diff --git a/x/mongo/driver/drivertest/opmsg_deployment_test.go b/x/mongo/driver/drivertest/opmsg_deployment_test.go index 24e5294e99..3dc5e86265 100644 --- a/x/mongo/driver/drivertest/opmsg_deployment_test.go +++ b/x/mongo/driver/drivertest/opmsg_deployment_test.go @@ -21,11 +21,8 @@ func TestOPMSGMockDeployment(t *testing.T) { md := NewMockDeployment() opts := options.Client() - opts.Opts = append(opts.Opts, func(co *options.ClientOptions) error { - co.Deployment = md + opts.Deployment = md - return nil - }) client, err := mongo.Connect(opts) t.Run("NewMockDeployment connect to client", func(t *testing.T) { diff --git a/x/mongo/driver/errors.go b/x/mongo/driver/errors.go index 61847329f2..f4bd46deb5 100644 --- a/x/mongo/driver/errors.go +++ b/x/mongo/driver/errors.go @@ -25,7 +25,22 @@ import ( const LegacyNotPrimaryErrMsg = "not master" var ( - retryableCodes = []int32{11600, 11602, 10107, 13435, 13436, 189, 91, 7, 6, 89, 9001, 262} + retryableCodes = []int32{ + 6, // HostUnreachable + 7, // HostNotFound + 89, // NetworkTimeout + 91, // ShutdownInProgress + 134, // ReadConcernMajorityNotAvailableYet + 189, // PrimarySteppedDown + 262, // ExceededTimeLimit + 9001, // SocketException + 10107, // NotWritablePrimary + 11600, // InterruptedAtShutdown + 11602, // InterruptedDueToReplStateChange + 13435, // NotPrimaryNoSecondaryOk + 13436, // NotPrimaryOrSecondary + } + nodeIsRecoveringCodes = []int32{11600, 11602, 13436, 189, 91} notPrimaryCodes = []int32{10107, 13435, 10058} nodeIsShuttingDownCodes = []int32{11600, 91} diff --git a/x/mongo/driver/operation/distinct.go b/x/mongo/driver/operation/distinct.go index 19e8edd80f..89d412def3 100644 --- a/x/mongo/driver/operation/distinct.go +++ b/x/mongo/driver/operation/distinct.go @@ -31,6 +31,7 @@ type Distinct struct { clock *session.ClusterClock collection string comment bsoncore.Value + hint bsoncore.Value monitor *event.CommandMonitor crypt driver.Crypt database string @@ -120,6 +121,9 @@ func (d *Distinct) command(dst []byte, desc description.SelectedServer) ([]byte, if d.comment.Type != bsoncore.Type(0) { dst = bsoncore.AppendValueElement(dst, "comment", d.comment) } + if d.hint.Type != bsoncore.Type(0) { + dst = bsoncore.AppendValueElement(dst, "hint", d.hint) + } if d.key != nil { dst = bsoncore.AppendStringElement(dst, "key", *d.key) } @@ -199,6 +203,16 @@ func (d *Distinct) Comment(comment bsoncore.Value) *Distinct { return d } +// Hint sets a value to help trace an operation. +func (d *Distinct) Hint(hint bsoncore.Value) *Distinct { + if d == nil { + d = new(Distinct) + } + + d.hint = hint + return d +} + // CommandMonitor sets the monitor to use for APM events. func (d *Distinct) CommandMonitor(monitor *event.CommandMonitor) *Distinct { if d == nil { diff --git a/x/mongo/driver/operation/hello.go b/x/mongo/driver/operation/hello.go index 16655f691d..dd232f0623 100644 --- a/x/mongo/driver/operation/hello.go +++ b/x/mongo/driver/operation/hello.go @@ -50,6 +50,11 @@ type Hello struct { loadBalanced bool omitMaxTimeMS bool + // Fields provided by a library that wraps the Go Driver. + outerLibraryName string + outerLibraryVersion string + outerLibraryPlatform string + res bsoncore.Document } @@ -123,6 +128,29 @@ func (h *Hello) LoadBalanced(lb bool) *Hello { return h } +// OuterLibraryName specifies the name of the library wrapping the Go Driver. +func (h *Hello) OuterLibraryName(name string) *Hello { + h.outerLibraryName = name + + return h +} + +// OuterLibraryVersion specifies the version of the library wrapping the Go +// Driver. +func (h *Hello) OuterLibraryVersion(version string) *Hello { + h.outerLibraryVersion = version + + return h +} + +// OuterLibraryPlatform specifies the platform of the library wrapping the Go +// Driver. +func (h *Hello) OuterLibraryPlatform(platform string) *Hello { + h.outerLibraryPlatform = platform + + return h +} + // Result returns the result of executing this operation. func (h *Hello) Result(addr address.Address) description.Server { return driverutil.NewServerDescription(addr, bson.Raw(h.res)) @@ -247,12 +275,22 @@ func appendClientAppName(dst []byte, name string) ([]byte, error) { // appendClientDriver appends the driver metadata to dst. It is the // responsibility of the caller to check that this appending does not cause dst // to exceed any size limitations. -func appendClientDriver(dst []byte) ([]byte, error) { +func appendClientDriver(dst []byte, outerLibraryName, outerLibraryVersion string) ([]byte, error) { var idx int32 idx, dst = bsoncore.AppendDocumentElementStart(dst, "driver") - dst = bsoncore.AppendStringElement(dst, "name", driverName) - dst = bsoncore.AppendStringElement(dst, "version", version.Driver) + name := driverName + if outerLibraryName != "" { + name = name + "|" + outerLibraryName + } + + version := version.Driver + if outerLibraryVersion != "" { + version = version + "|" + outerLibraryVersion + } + + dst = bsoncore.AppendStringElement(dst, "name", name) + dst = bsoncore.AppendStringElement(dst, "version", version) return bsoncore.AppendDocumentEnd(dst, idx) } @@ -374,8 +412,13 @@ func appendClientOS(dst []byte, omitNonType bool) ([]byte, error) { // appendClientPlatform appends the platform metadata to dst. It is the // responsibility of the caller to check that this appending does not cause dst // to exceed any size limitations. -func appendClientPlatform(dst []byte) []byte { - return bsoncore.AppendStringElement(dst, "platform", runtime.Version()) +func appendClientPlatform(dst []byte, outerLibraryPlatform string) []byte { + platform := runtime.Version() + if outerLibraryPlatform != "" { + platform = platform + "|" + outerLibraryPlatform + } + + return bsoncore.AppendStringElement(dst, "platform", platform) } // encodeClientMetadata encodes the client metadata into a BSON document. maxLen @@ -412,7 +455,7 @@ func appendClientPlatform(dst []byte) []byte { // } // } // } -func encodeClientMetadata(appname string, maxLen int) ([]byte, error) { +func encodeClientMetadata(h *Hello, maxLen int) ([]byte, error) { dst := make([]byte, 0, maxLen) omitEnvDoc := false @@ -426,12 +469,12 @@ retry: idx, dst = bsoncore.AppendDocumentStart(dst) var err error - dst, err = appendClientAppName(dst, appname) + dst, err = appendClientAppName(dst, h.appname) if err != nil { return nil, err } - dst, err = appendClientDriver(dst) + dst, err = appendClientDriver(dst, h.outerLibraryName, h.outerLibraryVersion) if err != nil { return nil, err } @@ -442,7 +485,7 @@ retry: } if !truncatePlatform { - dst = appendClientPlatform(dst) + dst = appendClientPlatform(dst, h.outerLibraryPlatform) } if !omitEnvDocument { @@ -519,7 +562,7 @@ func (h *Hello) handshakeCommand(dst []byte, desc description.SelectedServer) ([ } dst, _ = bsoncore.AppendArrayEnd(dst, idx) - clientMetadata, _ := encodeClientMetadata(h.appname, maxClientMetadataSize) + clientMetadata, _ := encodeClientMetadata(h, maxClientMetadataSize) // If the client metadata is empty, do not append it to the command. if len(clientMetadata) > 0 { diff --git a/x/mongo/driver/operation/hello_test.go b/x/mongo/driver/operation/hello_test.go index 23db7d1bac..187c23aae8 100644 --- a/x/mongo/driver/operation/hello_test.go +++ b/x/mongo/driver/operation/hello_test.go @@ -112,12 +112,22 @@ func TestAppendClientDriver(t *testing.T) { t.Parallel() tests := []struct { - name string - want []byte // Extend JSON + name string + outerLibraryName string + outerLibraryVersion string + want []byte // Extend JSON }{ { - name: "full", - want: []byte(fmt.Sprintf(`{"driver":{"name": %q, "version": %q}}`, driverName, version.Driver)), + name: "full", + outerLibraryName: "", + outerLibraryVersion: "", + want: []byte(fmt.Sprintf(`{"driver":{"name": %q, "version": %q}}`, driverName, version.Driver)), + }, + { + name: "with outer library data", + outerLibraryName: "outer-library-name", + outerLibraryVersion: "outer-library-version", + want: []byte(fmt.Sprintf(`{"driver":{"name": "%s|outer-library-name", "version": "%s|outer-library-version"}}`, driverName, version.Driver)), }, } @@ -129,7 +139,7 @@ func TestAppendClientDriver(t *testing.T) { cb := func(_ int, dst []byte) ([]byte, error) { var err error - dst, err = appendClientDriver(dst) + dst, err = appendClientDriver(dst, test.outerLibraryName, test.outerLibraryVersion) return dst, err } @@ -351,12 +361,19 @@ func TestAppendClientPlatform(t *testing.T) { t.Parallel() tests := []struct { - name string - want []byte // Extended JSON + name string + outerLibraryPlatform string + want []byte // Extended JSON }{ { - name: "full", - want: []byte(fmt.Sprintf(`{"platform":%q}`, runtime.Version())), + name: "full", + outerLibraryPlatform: "", + want: []byte(fmt.Sprintf(`{"platform":%q}`, runtime.Version())), + }, + { + name: "with outer library data", + outerLibraryPlatform: "outer-library-platform", + want: []byte(fmt.Sprintf(`{"platform":"%s|outer-library-platform"}`, runtime.Version())), }, } @@ -368,7 +385,7 @@ func TestAppendClientPlatform(t *testing.T) { cb := func(_ int, dst []byte) ([]byte, error) { var err error - dst = appendClientPlatform(dst) + dst = appendClientPlatform(dst, test.outerLibraryPlatform) return dst, err } @@ -435,7 +452,7 @@ func TestEncodeClientMetadata(t *testing.T) { t.Setenv("KUBERNETES_SERVICE_HOST", "0.0.0.0") t.Run("nothing is omitted", func(t *testing.T) { - got, err := encodeClientMetadata("foo", maxClientMetadataSize) + got, err := encodeClientMetadata(NewHello().AppName("foo"), maxClientMetadataSize) assert.Nil(t, err, "error in encodeClientMetadata: %v", err) want := formatJSON(&clientMetadata{ @@ -458,10 +475,10 @@ func TestEncodeClientMetadata(t *testing.T) { t.Run("env is omitted sub env.name", func(t *testing.T) { // Calculate the full length of a bsoncore.Document. - temp, err := encodeClientMetadata("foo", maxClientMetadataSize) + temp, err := encodeClientMetadata(NewHello().AppName("foo"), maxClientMetadataSize) require.NoError(t, err, "error constructing template: %v", err) - got, err := encodeClientMetadata("foo", len(temp)-1) + got, err := encodeClientMetadata(NewHello().AppName("foo"), len(temp)-1) assert.Nil(t, err, "error in encodeClientMetadata: %v", err) want := formatJSON(&clientMetadata{ @@ -482,7 +499,7 @@ func TestEncodeClientMetadata(t *testing.T) { t.Run("os is omitted sub os.type", func(t *testing.T) { // Calculate the full length of a bsoncore.Document. - temp, err := encodeClientMetadata("foo", maxClientMetadataSize) + temp, err := encodeClientMetadata(NewHello().AppName("foo"), maxClientMetadataSize) require.NoError(t, err, "error constructing template: %v", err) // Calculate what the environment costs. @@ -499,7 +516,7 @@ func TestEncodeClientMetadata(t *testing.T) { // Environment sub name. envSubName := len(edst) - len(ndst) - got, err := encodeClientMetadata("foo", len(temp)-envSubName-1) + got, err := encodeClientMetadata(NewHello().AppName("foo"), len(temp)-envSubName-1) assert.Nil(t, err, "error in encodeClientMetadata: %v", err) want := formatJSON(&clientMetadata{ @@ -520,7 +537,7 @@ func TestEncodeClientMetadata(t *testing.T) { t.Run("omit the env doc entirely", func(t *testing.T) { // Calculate the full length of a bsoncore.Document. - temp, err := encodeClientMetadata("foo", maxClientMetadataSize) + temp, err := encodeClientMetadata(NewHello().AppName("foo"), maxClientMetadataSize) require.NoError(t, err, "error constructing template: %v", err) // Calculate what the environment costs. @@ -533,7 +550,7 @@ func TestEncodeClientMetadata(t *testing.T) { // Calculate what the environment plus the os.type costs. envAndOSType := len(edst) + len(odst) - got, err := encodeClientMetadata("foo", len(temp)-envAndOSType-1) + got, err := encodeClientMetadata(NewHello().AppName("foo"), len(temp)-envAndOSType-1) assert.Nil(t, err, "error in encodeClientMetadata: %v", err) want := formatJSON(&clientMetadata{ @@ -548,7 +565,7 @@ func TestEncodeClientMetadata(t *testing.T) { t.Run("omit the platform", func(t *testing.T) { // Calculate the full length of a bsoncore.Document. - temp, err := encodeClientMetadata("foo", maxClientMetadataSize) + temp, err := encodeClientMetadata(NewHello().AppName("foo"), maxClientMetadataSize) require.NoError(t, err, "error constructing template: %v", err) // Calculate what the environment costs. @@ -559,12 +576,12 @@ func TestEncodeClientMetadata(t *testing.T) { odst := bsoncore.AppendStringElement(nil, "type", runtime.GOOS) // Calculate what the platform costs - pdst := appendClientPlatform(nil) + pdst := appendClientPlatform(nil, "") // Calculate what the environment plus the os.type costs. envAndOSTypeAndPlatform := len(edst) + len(odst) + len(pdst) - got, err := encodeClientMetadata("foo", len(temp)-envAndOSTypeAndPlatform) + got, err := encodeClientMetadata(NewHello().AppName("foo"), len(temp)-envAndOSTypeAndPlatform) assert.Nil(t, err, "error in encodeClientMetadata: %v", err) want := formatJSON(&clientMetadata{ @@ -577,7 +594,7 @@ func TestEncodeClientMetadata(t *testing.T) { }) t.Run("0 max len", func(t *testing.T) { - got, err := encodeClientMetadata("foo", 0) + got, err := encodeClientMetadata(NewHello().AppName("foo"), 0) assert.Nil(t, err, "error in encodeClientMetadata: %v", err) assert.Len(t, got, 0) }) @@ -657,7 +674,7 @@ func BenchmarkClientMetadata(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { - _, err := encodeClientMetadata("foo", maxClientMetadataSize) + _, err := encodeClientMetadata(NewHello().AppName("foo"), maxClientMetadataSize) if err != nil { b.Fatal(err) } @@ -680,7 +697,7 @@ func BenchmarkClientMetadtaLargeEnv(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { - _, err := encodeClientMetadata("foo", maxClientMetadataSize) + _, err := encodeClientMetadata(NewHello().AppName("foo"), maxClientMetadataSize) if err != nil { b.Fatal(err) } @@ -694,7 +711,7 @@ func FuzzEncodeClientMetadata(f *testing.F) { return } - _, err := encodeClientMetadata(appname, maxClientMetadataSize) + _, err := encodeClientMetadata(NewHello().AppName(appname), maxClientMetadataSize) if err != nil { t.Fatalf("error appending client: %v", err) } @@ -704,7 +721,7 @@ func FuzzEncodeClientMetadata(f *testing.F) { t.Fatalf("error appending client app name: %v", err) } - _, err = appendClientDriver(b) + _, err = appendClientDriver(b, "", "") if err != nil { t.Fatalf("error appending client driver: %v", err) } @@ -739,6 +756,6 @@ func FuzzEncodeClientMetadata(f *testing.F) { t.Fatalf("error appending client os t: %v", err) } - appendClientPlatform(b) + appendClientPlatform(b, "") }) } diff --git a/x/mongo/driver/topology/server.go b/x/mongo/driver/topology/server.go index 55a2ddd8c6..7fab1136f8 100644 --- a/x/mongo/driver/topology/server.go +++ b/x/mongo/driver/topology/server.go @@ -808,7 +808,8 @@ func (s *Server) createConnection() *connection { opts = append(opts, WithHandshaker(func(Handshaker) Handshaker { return operation.NewHello().AppName(s.cfg.appname).Compressors(s.cfg.compressionOpts). - ServerAPI(s.cfg.serverAPI) + ServerAPI(s.cfg.serverAPI).OuterLibraryName(s.cfg.outerLibraryName). + OuterLibraryVersion(s.cfg.outerLibraryVersion).OuterLibraryPlatform(s.cfg.outerLibraryPlatform) }), // Override any monitors specified in options with nil to avoid monitoring heartbeats. WithMonitor(func(*event.CommandMonitor) *event.CommandMonitor { return nil }), diff --git a/x/mongo/driver/topology/server_options.go b/x/mongo/driver/topology/server_options.go index 8d86d5bd85..490834cbef 100644 --- a/x/mongo/driver/topology/server_options.go +++ b/x/mongo/driver/topology/server_options.go @@ -41,6 +41,11 @@ type serverConfig struct { logger *logger.Logger poolMaxIdleTime time.Duration poolMaintainInterval time.Duration + + // Fields provided by a library that wraps the Go Driver. + outerLibraryName string + outerLibraryVersion string + outerLibraryPlatform string } func newServerConfig(connectTimeout time.Duration, opts ...ServerOption) *serverConfig { @@ -96,6 +101,30 @@ func WithServerAppName(fn func(string) string) ServerOption { } } +// WithOuterLibraryName configures the name for the outer library to include +// in the drivers section of the handshake metadata. +func WithOuterLibraryName(fn func(string) string) ServerOption { + return func(cfg *serverConfig) { + cfg.outerLibraryName = fn(cfg.outerLibraryName) + } +} + +// WithOuterLibraryVersion configures the version for the outer library to +// include in the drivers section of the handshake metadata. +func WithOuterLibraryVersion(fn func(string) string) ServerOption { + return func(cfg *serverConfig) { + cfg.outerLibraryVersion = fn(cfg.outerLibraryVersion) + } +} + +// WithOuterLibraryPlatform configures the platform for the outer library to +// include in the platform section of the handshake metadata. +func WithOuterLibraryPlatform(fn func(string) string) ServerOption { + return func(cfg *serverConfig) { + cfg.outerLibraryPlatform = fn(cfg.outerLibraryPlatform) + } +} + // WithHeartbeatInterval configures a server's heartbeat interval. func WithHeartbeatInterval(fn func(time.Duration) time.Duration) ServerOption { return func(cfg *serverConfig) { diff --git a/x/mongo/driver/topology/topology_options.go b/x/mongo/driver/topology/topology_options.go index d98b47d5ef..aefa74c56a 100644 --- a/x/mongo/driver/topology/topology_options.go +++ b/x/mongo/driver/topology/topology_options.go @@ -15,7 +15,6 @@ import ( "go.mongodb.org/mongo-driver/v2/event" "go.mongodb.org/mongo-driver/v2/internal/logger" - "go.mongodb.org/mongo-driver/v2/internal/mongoutil" "go.mongodb.org/mongo-driver/v2/mongo/options" "go.mongodb.org/mongo-driver/v2/x/mongo/driver" "go.mongodb.org/mongo-driver/v2/x/mongo/driver/auth" @@ -44,36 +43,30 @@ type Config struct { logger *logger.Logger } -// ConvertToDriverAPIOptions converts a options.ServerAPIOptions instance to a driver.ServerAPIOptions. -func ConvertToDriverAPIOptions(opts options.Lister[options.ServerAPIOptions]) *driver.ServerAPIOptions { - args, _ := mongoutil.NewOptions[options.ServerAPIOptions](opts) - - driverOpts := driver.NewServerAPIOptions(string(args.ServerAPIVersion)) - if args.Strict != nil { - driverOpts.SetStrict(*args.Strict) +// ConvertToDriverAPIOptions converts a given ServerAPIOptions object from the +// options package to a ServerAPIOptions object from the driver package. +func ConvertToDriverAPIOptions(opts *options.ServerAPIOptions) *driver.ServerAPIOptions { + driverOpts := driver.NewServerAPIOptions(string(opts.ServerAPIVersion)) + if opts.Strict != nil { + driverOpts.SetStrict(*opts.Strict) } - if args.DeprecationErrors != nil { - driverOpts.SetDeprecationErrors(*args.DeprecationErrors) + if opts.DeprecationErrors != nil { + driverOpts.SetDeprecationErrors(*opts.DeprecationErrors) } return driverOpts } -func newLogger(opts options.Lister[options.LoggerOptions]) (*logger.Logger, error) { +func newLogger(opts *options.LoggerOptions) (*logger.Logger, error) { if opts == nil { opts = options.Logger() } - args, err := mongoutil.NewOptions[options.LoggerOptions](opts) - if err != nil { - return nil, fmt.Errorf("failed to construct options from builder: %w", err) - } - componentLevels := make(map[logger.Component]logger.Level) - for component, level := range args.ComponentLevels { + for component, level := range opts.ComponentLevels { componentLevels[logger.Component(component)] = logger.Level(level) } - log, err := logger.New(args.Sink, args.MaxDocumentLength, componentLevels) + log, err := logger.New(opts.Sink, opts.MaxDocumentLength, componentLevels) if err != nil { return nil, fmt.Errorf("error creating logger: %w", err) } @@ -128,21 +121,10 @@ func ConvertCreds(cred *options.Credential) *driver.Cred { } } -// NewConfig behaves like NewConfigFromOptions by extracting arguments from a -// list of ClientOptions setters. -func NewConfig(opts *options.ClientOptionsBuilder, clock *session.ClusterClock) (*Config, error) { - args, err := mongoutil.NewOptions[options.ClientOptions](opts) - if err != nil { - return nil, fmt.Errorf("failed to construct options from builder: %w", err) - } - - return NewConfigFromOptions(args, clock) -} - -// NewConfigFromOptions will translate data from client options into a topology -// config for building non-default deployments. Server and topology options are -// not honored if a custom deployment is used. -func NewConfigFromOptions(opts *options.ClientOptions, clock *session.ClusterClock) (*Config, error) { +// NewConfig will translate data from client options into a topology config for +// building non-default deployments. Server and topology options are not honored +// if a custom deployment is used. +func NewConfig(opts *options.ClientOptions, clock *session.ClusterClock) (*Config, error) { var authenticator driver.Authenticator var err error if opts.Auth != nil { @@ -163,20 +145,9 @@ func NewConfigFromOptions(opts *options.ClientOptions, clock *session.ClusterClo // options are not honored if a custom deployment is used. It uses a passed in // authenticator to authenticate the connection. func NewConfigFromOptionsWithAuthenticator(opts *options.ClientOptions, clock *session.ClusterClock, authenticator driver.Authenticator) (*Config, error) { - var serverAPI *driver.ServerAPIOptions - clientOptsBldr := options.ClientOptionsBuilder{ - Opts: []func(*options.ClientOptions) error{ - func(copts *options.ClientOptions) error { - *copts = *opts - - return nil - }, - }, - } - - if err := clientOptsBldr.Validate(); err != nil { + if err := opts.Validate(); err != nil { return nil, err } @@ -226,6 +197,26 @@ func NewConfigFromOptionsWithAuthenticator(opts *options.ClientOptions, clock *s return appName })) } + + var outerLibraryName, outerLibraryVersion, outerLibraryPlatform string + if opts.DriverInfo != nil { + outerLibraryName = opts.DriverInfo.Name + outerLibraryVersion = opts.DriverInfo.Version + outerLibraryPlatform = opts.DriverInfo.Platform + + serverOpts = append(serverOpts, WithOuterLibraryName(func(string) string { + return outerLibraryName + })) + + serverOpts = append(serverOpts, WithOuterLibraryVersion(func(string) string { + return outerLibraryVersion + })) + + serverOpts = append(serverOpts, WithOuterLibraryPlatform(func(string) string { + return outerLibraryPlatform + })) + } + // Compressors & ZlibLevel var comps []string if len(opts.Compressors) > 0 { @@ -264,12 +255,15 @@ func NewConfigFromOptionsWithAuthenticator(opts *options.ClientOptions, clock *s var handshaker func(driver.Handshaker) driver.Handshaker if authenticator != nil { handshakeOpts := &auth.HandshakeOptions{ - AppName: appName, - Authenticator: authenticator, - Compressors: comps, - ServerAPI: serverAPI, - LoadBalanced: loadBalanced, - ClusterClock: clock, + AppName: appName, + Authenticator: authenticator, + Compressors: comps, + ServerAPI: serverAPI, + LoadBalanced: loadBalanced, + ClusterClock: clock, + OuterLibraryName: outerLibraryName, + OuterLibraryVersion: outerLibraryVersion, + OuterLibraryPlatform: outerLibraryPlatform, } if opts.Auth.AuthMechanism == "" { @@ -288,7 +282,10 @@ func NewConfigFromOptionsWithAuthenticator(opts *options.ClientOptions, clock *s Compressors(comps). ClusterClock(clock). ServerAPI(serverAPI). - LoadBalanced(loadBalanced) + LoadBalanced(loadBalanced). + OuterLibraryName(outerLibraryName). + OuterLibraryVersion(outerLibraryVersion). + OuterLibraryPlatform(outerLibraryPlatform) } } diff --git a/x/mongo/driver/topology/topology_test.go b/x/mongo/driver/topology/topology_test.go index 5aa856c443..b4af40b920 100644 --- a/x/mongo/driver/topology/topology_test.go +++ b/x/mongo/driver/topology/topology_test.go @@ -552,7 +552,7 @@ func TestTopologyConstructionLogging(t *testing.T) { documentDBMsg = `You appear to be connected to a DocumentDB cluster. For more information regarding feature compatibility and support please visit https://www.mongodb.com/supportability/documentdb` ) - newLoggerOptionsBldr := func(sink options.LogSink) *options.LoggerOptionsBuilder { + newLoggerOptionsBldr := func(sink options.LogSink) *options.LoggerOptions { return options. Logger(). SetSink(sink).