diff --git a/Cargo.lock b/Cargo.lock index 78a19264e9c..30273b8d3f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -78,7 +78,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", ] @@ -89,7 +89,7 @@ version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "getrandom 0.2.15", "once_cell", "version_check", @@ -293,8 +293,8 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "itoa", "matchit", @@ -319,8 +319,8 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "mime", "pin-project-lite", @@ -338,7 +338,7 @@ checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide 0.7.4", "object", @@ -573,16 +573,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "bstr" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" -dependencies = [ - "memchr", - "serde", -] - [[package]] name = "bumpalo" version = "3.16.0" @@ -697,12 +687,6 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -721,7 +705,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", ] @@ -975,7 +959,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1070,7 +1054,7 @@ version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest", @@ -1538,7 +1522,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -1549,7 +1533,7 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -1581,19 +1565,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" -[[package]] -name = "globset" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f1ce686646e7f1e19bf7d5533fe443a45dbfb990e00629110797578b42fb19" -dependencies = [ - "aho-corasick", - "bstr", - "log", - "regex-automata 0.4.8", - "regex-syntax 0.8.5", -] - [[package]] name = "group" version = "0.13.0" @@ -1617,11 +1588,11 @@ dependencies = [ "fnv", "futures-core", "futures-sink", - "http 1.1.0", + "http", "indexmap 2.7.0", "slab", "tokio", - "tokio-util 0.7.13", + "tokio-util", "tracing", ] @@ -1631,7 +1602,7 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crunchy", ] @@ -1788,7 +1759,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "windows", ] @@ -1801,18 +1772,7 @@ checksum = "f34059280f617a59ee59a0455e93460d67e5c76dec42dd262d38f0f390f437b2" dependencies = [ "flume", "indicatif", - "parking_lot 0.12.3", -] - -[[package]] -name = "http" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes", - "fnv", - "itoa", + "parking_lot", ] [[package]] @@ -1826,17 +1786,6 @@ dependencies = [ "itoa", ] -[[package]] -name = "http-body" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" -dependencies = [ - "bytes", - "http 0.2.12", - "pin-project-lite", -] - [[package]] name = "http-body" version = "1.0.1" @@ -1844,7 +1793,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.1.0", + "http", ] [[package]] @@ -1855,8 +1804,8 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "pin-project-lite", ] @@ -1894,29 +1843,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hyper" -version = "0.14.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - [[package]] name = "hyper" version = "1.5.1" @@ -1927,8 +1853,8 @@ dependencies = [ "futures-channel", "futures-util", "h2", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "httparse", "httpdate", "itoa", @@ -1945,8 +1871,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", - "http 1.1.0", - "hyper 1.5.1", + "http", + "hyper", "hyper-util", "rustls", "rustls-pki-types", @@ -1962,7 +1888,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.5.1", + "hyper", "hyper-util", "pin-project-lite", "tokio", @@ -1978,9 +1904,9 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.1.0", - "http-body 1.0.1", - "hyper 1.5.1", + "http", + "http-body", + "hyper", "pin-project-lite", "socket2", "tokio", @@ -2138,15 +2064,6 @@ dependencies = [ "similar", ] -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if 1.0.0", -] - [[package]] name = "ipnet" version = "2.10.1" @@ -2248,49 +2165,90 @@ dependencies = [ ] [[package]] -name = "jsonrpc-derive" -version = "18.0.0" +name = "jsonrpsee" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" +checksum = "c5c71d8c1a731cc4227c2f698d377e7848ca12c8a48866fc5e6951c43a4db843" dependencies = [ - "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", - "syn 1.0.109", + "jsonrpsee-core", + "jsonrpsee-server", + "jsonrpsee-types", + "tokio", ] [[package]] -name = "jsonrpc-http-server" -version = "18.0.0" +name = "jsonrpsee-core" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" +checksum = "f2882f6f8acb9fdaec7cefc4fd607119a9bd709831df7d7672a1d3b644628280" dependencies = [ - "futures", - "hyper 0.14.31", - "jsonrpc-core", - "jsonrpc-server-utils", - "log", - "net2", - "parking_lot 0.11.2", - "unicase", + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "jsonrpsee-types", + "parking_lot", + "rand 0.8.5", + "rustc-hash 2.0.0", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tracing", ] [[package]] -name = "jsonrpc-server-utils" -version = "18.0.0" +name = "jsonrpsee-proc-macros" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" +checksum = "c06c01ae0007548e73412c08e2285ffe5d723195bf268bce67b1b77c3bb2a14d" dependencies = [ - "bytes", - "futures", - "globset", - "jsonrpc-core", - "lazy_static", - "log", + "heck 0.5.0", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "jsonrpsee-server" +version = "0.24.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82ad8ddc14be1d4290cd68046e7d1d37acd408efed6d3ca08aefcc3ad6da069c" +dependencies = [ + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "jsonrpsee-core", + "jsonrpsee-types", + "pin-project", + "route-recognizer", + "serde", + "serde_json", + "soketto", + "thiserror 1.0.69", "tokio", "tokio-stream", - "tokio-util 0.6.10", - "unicase", + "tokio-util", + "tower 0.4.13", + "tracing", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.24.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a178c60086f24cc35bb82f57c651d0d25d99c4742b4d335de04e97fa1f08a8a1" +dependencies = [ + "http", + "serde", + "serde_json", + "thiserror 1.0.69", ] [[package]] @@ -2355,7 +2313,7 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-targets 0.52.6", ] @@ -2477,7 +2435,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "rayon", ] @@ -2514,7 +2472,7 @@ checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", "http-body-util", - "hyper 1.5.1", + "hyper", "hyper-util", "indexmap 2.7.0", "ipnet", @@ -2603,17 +2561,6 @@ dependencies = [ "getrandom 0.2.15", ] -[[package]] -name = "net2" -version = "0.2.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13b648036a2339d06de780866fbdfda0dde886de7b3af2ddeba8b14f4ee34ac" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi", -] - [[package]] name = "nix" version = "0.29.0" @@ -2621,7 +2568,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ "bitflags 2.6.0", - "cfg-if 1.0.0", + "cfg-if", "cfg_aliases", "libc", ] @@ -2854,23 +2801,12 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 3.2.0", + "proc-macro-crate", "proc-macro2", "quote", "syn 1.0.109", ] -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - [[package]] name = "parking_lot" version = "0.12.3" @@ -2878,21 +2814,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core", ] [[package]] @@ -2901,9 +2823,9 @@ version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "redox_syscall 0.5.7", + "redox_syscall", "smallvec", "windows-targets 0.52.6", ] @@ -3113,15 +3035,6 @@ dependencies = [ "uint 0.9.5", ] -[[package]] -name = "proc-macro-crate" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" -dependencies = [ - "toml 0.5.11", -] - [[package]] name = "proc-macro-crate" version = "3.2.0" @@ -3505,15 +3418,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_syscall" version = "0.5.7" @@ -3590,10 +3494,10 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", - "hyper 1.5.1", + "hyper", "hyper-rustls", "hyper-util", "ipnet", @@ -3613,7 +3517,7 @@ dependencies = [ "sync_wrapper 1.0.1", "tokio", "tokio-rustls", - "tokio-util 0.7.13", + "tokio-util", "tower-service", "url", "wasm-bindgen", @@ -3639,7 +3543,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", - "cfg-if 1.0.0", + "cfg-if", "getrandom 0.2.15", "libc", "spin", @@ -3686,6 +3590,12 @@ dependencies = [ "serde", ] +[[package]] +name = "route-recognizer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" + [[package]] name = "rustc-demangle" version = "0.1.24" @@ -4076,13 +3986,24 @@ dependencies = [ "version_check", ] +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "sha2" version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest", ] @@ -4169,6 +4090,22 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "soketto" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures", + "http", + "httparse", + "log", + "rand 0.8.5", + "sha1", +] + [[package]] name = "spandoc" version = "0.2.2" @@ -4325,7 +4262,7 @@ version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", "once_cell", "rustix", @@ -4397,7 +4334,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe075d7053dae61ac5413a34ea7d4913b6e6207844fd726bdd858b37ff72bf5" dependencies = [ "bitflags 2.6.0", - "cfg-if 1.0.0", + "cfg-if", "libc", "log", "rustversion", @@ -4410,7 +4347,7 @@ version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] @@ -4482,7 +4419,7 @@ dependencies = [ "bytes", "libc", "mio", - "parking_lot 0.12.3", + "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", @@ -4522,7 +4459,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util 0.7.13", + "tokio-util", ] [[package]] @@ -4538,20 +4475,6 @@ dependencies = [ "tokio-stream", ] -[[package]] -name = "tokio-util" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-util" version = "0.7.13" @@ -4560,6 +4483,7 @@ checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "tokio", @@ -4620,10 +4544,10 @@ dependencies = [ "base64 0.22.1", "bytes", "h2", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", - "hyper 1.5.1", + "hyper", "hyper-timeout", "hyper-util", "percent-encoding", @@ -4680,7 +4604,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.13", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -4714,7 +4638,7 @@ dependencies = [ "tinyvec", "tokio", "tokio-test", - "tokio-util 0.7.13", + "tokio-util", "tower 0.4.13", "tower-fallback", "tower-test", @@ -4966,12 +4890,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" -[[package]] -name = "unicase" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" - [[package]] name = "unicode-bidi" version = "0.3.17" @@ -5101,7 +5019,7 @@ checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" dependencies = [ "anyhow", "cargo_metadata", - "cfg-if 1.0.0", + "cfg-if", "git2", "regex", "rustc_version", @@ -5228,7 +5146,7 @@ version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", "wasm-bindgen-macro", ] @@ -5254,7 +5172,7 @@ version = "0.4.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -5962,7 +5880,7 @@ dependencies = [ "thiserror 2.0.6", "tokio", "tokio-stream", - "tokio-util 0.7.13", + "tokio-util", "toml 0.8.19", "tower 0.4.13", "tracing", @@ -5994,11 +5912,13 @@ dependencies = [ "color-eyre", "futures", "hex", + "http-body-util", + "hyper", "indexmap 2.7.0", "insta", - "jsonrpc-core", - "jsonrpc-derive", - "jsonrpc-http-server", + "jsonrpsee", + "jsonrpsee-proc-macros", + "jsonrpsee-types", "nix", "proptest", "prost", @@ -6204,13 +6124,13 @@ dependencies = [ "howudoin", "http-body-util", "humantime-serde", - "hyper 1.5.1", + "hyper", "hyper-util", "indexmap 2.7.0", "indicatif", "inferno", "insta", - "jsonrpc-core", + "jsonrpsee-types", "lazy_static", "log", "metrics", diff --git a/deny.toml b/deny.toml index 7f804946767..3ae46206943 100644 --- a/deny.toml +++ b/deny.toml @@ -78,19 +78,8 @@ skip-tree = [ { name = "base64", version = "=0.21.7" }, { name = "sync_wrapper", version = "0.1.2" }, - # wait for jsonrpc-http-server to update hyper or for Zebra to replace jsonrpc (#8682) - { name = "h2", version = "=0.3.26" }, - { name = "http", version = "=0.2.12" }, - { name = "http-body", version = "=0.4.6" }, - { name = "hyper", version = "=0.14.31" }, - { name = "hyper-rustls", version = "=0.24.2" }, - - { name = "reqwest", version = "=0.11.27" }, - { name = "rustls", version = "=0.21.12" }, - { name = "rustls-pemfile", version = "=1.0.4" }, - { name = "rustls-webpki", version = "=0.101.7" }, - { name = "tokio-rustls", version = "=0.24.1" }, - { name = "webpki-roots", version = "=0.25.4" }, + # wait for abscissa_core to update toml + { name = "toml", version = "=0.5.11" }, # wait for structopt-derive to update heck { name = "heck", version = "=0.3.3" }, diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 56b7f3c60f0..d180f049dc5 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -59,9 +59,11 @@ chrono = { version = "0.4.39", default-features = false, features = [ ] } futures = "0.3.31" -jsonrpc-core = "18.0.0" -jsonrpc-derive = "18.0.0" -jsonrpc-http-server = "18.0.0" +jsonrpsee = { version = "0.24.7", features = ["server"] } +jsonrpsee-types = "0.24.7" +jsonrpsee-proc-macros = "0.24.7" +hyper = "1.5.0" +http-body-util = "0.1.2" # zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core serde_json = { version = "1.0.133", features = ["preserve_order"] } diff --git a/zebra-rpc/src/config.rs b/zebra-rpc/src/config.rs index 57187163e55..2a91d14334b 100644 --- a/zebra-rpc/src/config.rs +++ b/zebra-rpc/src/config.rs @@ -50,24 +50,12 @@ pub struct Config { /// The number of threads used to process RPC requests and responses. /// - /// Zebra's RPC server has a separate thread pool and a `tokio` executor for each thread. - /// State queries are run concurrently using the shared thread pool controlled by - /// the [`SyncSection.parallel_cpu_threads`](https://docs.rs/zebrad/latest/zebrad/components/sync/struct.Config.html#structfield.parallel_cpu_threads) config. - /// - /// If the number of threads is not configured or zero, Zebra uses the number of logical cores. - /// If the number of logical cores can't be detected, Zebra uses one thread. - /// - /// Set to `1` to run all RPC queries on a single thread, and detect RPC port conflicts from - /// multiple Zebra or `zcashd` instances. - /// - /// For details, see [the `jsonrpc_http_server` documentation](https://docs.rs/jsonrpc-http-server/latest/jsonrpc_http_server/struct.ServerBuilder.html#method.threads). - /// - /// ## Warning - /// - /// The default config uses multiple threads, which disables RPC port conflict detection. - /// This can allow multiple Zebra instances to share the same RPC port. - /// - /// If some of those instances are outdated or failed, RPC queries can be slow or inconsistent. + /// This field is deprecated and could be removed in a future release. + /// We keep it just for backward compatibility but it actually do nothing. + /// It was something configurable when the RPC server was based in the jsonrpc-core crate, + /// not anymore since we migrated to jsonrpsee. + // TODO: Prefix this field name with an underscore so it's clear that it's now unused, and + // use serde(rename) to continue successfully deserializing old configs. pub parallel_cpu_threads: usize, /// Test-only option that makes Zebra say it is at the chain tip, diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index cfd8260d2ba..8634ec43ef5 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -9,12 +9,13 @@ use std::{collections::HashSet, fmt::Debug, sync::Arc}; use chrono::Utc; -use futures::{stream::FuturesOrdered, FutureExt, StreamExt, TryFutureExt}; +use futures::{stream::FuturesOrdered, StreamExt, TryFutureExt}; use hex::{FromHex, ToHex}; use hex_data::HexData; use indexmap::IndexMap; -use jsonrpc_core::{self, BoxFuture, Error, ErrorCode, Result}; -use jsonrpc_derive::rpc; +use jsonrpsee::core::{async_trait, RpcResult as Result}; +use jsonrpsee_proc_macros::rpc; +use jsonrpsee_types::{ErrorCode, ErrorObject}; use tokio::{sync::broadcast, task::JoinHandle}; use tower::{Service, ServiceExt}; use tracing::Instrument; @@ -56,7 +57,7 @@ pub mod types; pub mod get_block_template_rpcs; #[cfg(feature = "getblocktemplate-rpcs")] -pub use get_block_template_rpcs::{GetBlockTemplateRpc, GetBlockTemplateRpcImpl}; +pub use get_block_template_rpcs::{GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer}; #[cfg(test)] mod tests; @@ -64,7 +65,6 @@ mod tests; #[rpc(server)] /// RPC method signatures. pub trait Rpc { - #[rpc(name = "getinfo")] /// Returns software information from the RPC server, as a [`GetInfo`] JSON struct. /// /// zcashd reference: [`getinfo`](https://zcash.github.io/rpc/getinfo.html) @@ -79,6 +79,7 @@ pub trait Rpc { /// /// Some fields from the zcashd reference are missing from Zebra's [`GetInfo`]. It only contains the fields /// [required for lightwalletd support.](https://github.com/zcash/lightwalletd/blob/v0.4.9/common/common.go#L91-L95) + #[method(name = "getinfo")] fn get_info(&self) -> Result; /// Returns blockchain state information, as a [`GetBlockChainInfo`] JSON struct. @@ -91,8 +92,8 @@ pub trait Rpc { /// /// Some fields from the zcashd reference are missing from Zebra's [`GetBlockChainInfo`]. It only contains the fields /// [required for lightwalletd support.](https://github.com/zcash/lightwalletd/blob/v0.4.9/common/common.go#L72-L89) - #[rpc(name = "getblockchaininfo")] - fn get_blockchain_info(&self) -> BoxFuture>; + #[method(name = "getblockchaininfo")] + async fn get_blockchain_info(&self) -> Result; /// Returns the total balance of a provided `addresses` in an [`AddressBalance`] instance. /// @@ -116,11 +117,8 @@ pub trait Rpc { /// The RPC documentation says that the returned object has a string `balance` field, but /// zcashd actually [returns an /// integer](https://github.com/zcash/lightwalletd/blob/bdaac63f3ee0dbef62bde04f6817a9f90d483b00/common/common.go#L128-L130). - #[rpc(name = "getaddressbalance")] - fn get_address_balance( - &self, - address_strings: AddressStrings, - ) -> BoxFuture>; + #[method(name = "getaddressbalance")] + async fn get_address_balance(&self, address_strings: AddressStrings) -> Result; /// Sends the raw bytes of a signed transaction to the local node's mempool, if the transaction is valid. /// Returns the [`SentTransactionHash`] for the transaction, as a JSON string. @@ -137,11 +135,11 @@ pub trait Rpc { /// /// zcashd accepts an optional `allowhighfees` parameter. Zebra doesn't support this parameter, /// because lightwalletd doesn't use it. - #[rpc(name = "sendrawtransaction")] - fn send_raw_transaction( + #[method(name = "sendrawtransaction")] + async fn send_raw_transaction( &self, raw_transaction_hex: String, - ) -> BoxFuture>; + ) -> Result; /// Returns the requested block by hash or height, as a [`GetBlock`] JSON string. /// If the block is not in Zebra's state, returns @@ -167,12 +165,8 @@ pub trait Rpc { /// use verbosity=3. /// /// The undocumented `chainwork` field is not returned. - #[rpc(name = "getblock")] - fn get_block( - &self, - hash_or_height: String, - verbosity: Option, - ) -> BoxFuture>; + #[method(name = "getblock")] + async fn get_block(&self, hash_or_height: String, verbosity: Option) -> Result; /// Returns the requested block header by hash or height, as a [`GetBlockHeader`] JSON string. /// If the block is not in Zebra's state, @@ -191,19 +185,19 @@ pub trait Rpc { /// # Notes /// /// The undocumented `chainwork` field is not returned. - #[rpc(name = "getblockheader")] - fn get_block_header( + #[method(name = "getblockheader")] + async fn get_block_header( &self, hash_or_height: String, verbose: Option, - ) -> BoxFuture>; + ) -> Result; /// Returns the hash of the current best blockchain tip block, as a [`GetBlockHash`] JSON string. /// /// zcashd reference: [`getbestblockhash`](https://zcash.github.io/rpc/getbestblockhash.html) /// method: post /// tags: blockchain - #[rpc(name = "getbestblockhash")] + #[method(name = "getbestblockhash")] fn get_best_block_hash(&self) -> Result; /// Returns the height and hash of the current best blockchain tip block, as a [`GetBlockHeightAndHash`] JSON struct. @@ -211,7 +205,7 @@ pub trait Rpc { /// zcashd reference: none /// method: post /// tags: blockchain - #[rpc(name = "getbestblockheightandhash")] + #[method(name = "getbestblockheightandhash")] fn get_best_block_height_and_hash(&self) -> Result; /// Returns all transaction ids in the memory pool, as a JSON array. @@ -219,8 +213,8 @@ pub trait Rpc { /// zcashd reference: [`getrawmempool`](https://zcash.github.io/rpc/getrawmempool.html) /// method: post /// tags: blockchain - #[rpc(name = "getrawmempool")] - fn get_raw_mempool(&self) -> BoxFuture>>; + #[method(name = "getrawmempool")] + async fn get_raw_mempool(&self) -> Result>; /// Returns information about the given block's Sapling & Orchard tree state. /// @@ -238,8 +232,8 @@ pub trait Rpc { /// negative where -1 is the last known valid block". On the other hand, /// `lightwalletd` only uses positive heights, so Zebra does not support /// negative heights. - #[rpc(name = "z_gettreestate")] - fn z_get_treestate(&self, hash_or_height: String) -> BoxFuture>; + #[method(name = "z_gettreestate")] + async fn z_get_treestate(&self, hash_or_height: String) -> Result; /// Returns information about a range of Sapling or Orchard subtrees. /// @@ -259,13 +253,13 @@ pub trait Rpc { /// starting at the chain tip. This RPC will return an empty list if the `start_index` subtree /// exists, but has not been rebuilt yet. This matches `zcashd`'s behaviour when subtrees aren't /// available yet. (But `zcashd` does its rebuild before syncing any blocks.) - #[rpc(name = "z_getsubtreesbyindex")] - fn z_get_subtrees_by_index( + #[method(name = "z_getsubtreesbyindex")] + async fn z_get_subtrees_by_index( &self, pool: String, start_index: NoteCommitmentSubtreeIndex, limit: Option, - ) -> BoxFuture>; + ) -> Result; /// Returns the raw transaction data, as a [`GetRawTransaction`] JSON string or structure. /// @@ -286,12 +280,12 @@ pub trait Rpc { /// In verbose mode, we only expose the `hex` and `height` fields since /// lightwalletd uses only those: /// - #[rpc(name = "getrawtransaction")] - fn get_raw_transaction( + #[method(name = "getrawtransaction")] + async fn get_raw_transaction( &self, txid: String, verbose: Option, - ) -> BoxFuture>; + ) -> Result; /// Returns the transaction ids made by the provided transparent addresses. /// @@ -310,9 +304,8 @@ pub trait Rpc { /// /// Only the multi-argument format is used by lightwalletd and this is what we currently support: /// - #[rpc(name = "getaddresstxids")] - fn get_address_tx_ids(&self, request: GetAddressTxIdsRequest) - -> BoxFuture>>; + #[method(name = "getaddresstxids")] + async fn get_address_tx_ids(&self, request: GetAddressTxIdsRequest) -> Result>; /// Returns all unspent outputs for a list of addresses. /// @@ -328,11 +321,11 @@ pub trait Rpc { /// /// lightwalletd always uses the multi-address request, without chaininfo: /// - #[rpc(name = "getaddressutxos")] - fn get_address_utxos( + #[method(name = "getaddressutxos")] + async fn get_address_utxos( &self, address_strings: AddressStrings, - ) -> BoxFuture>>; + ) -> Result>; /// Stop the running zebrad process. /// @@ -344,7 +337,7 @@ pub trait Rpc { /// zcashd reference: [`stop`](https://zcash.github.io/rpc/stop.html) /// method: post /// tags: control - #[rpc(name = "stop")] + #[method(name = "stop")] fn stop(&self) -> Result; } @@ -516,7 +509,8 @@ where } } -impl Rpc for RpcImpl +#[async_trait] +impl RpcServer for RpcImpl where Mempool: Service< mempool::Request, @@ -548,198 +542,186 @@ where } #[allow(clippy::unwrap_in_result)] - fn get_blockchain_info(&self) -> BoxFuture> { + async fn get_blockchain_info(&self) -> Result { let network = self.network.clone(); let debug_force_finished_sync = self.debug_force_finished_sync; let mut state = self.state.clone(); - async move { - // `chain` field - let chain = network.bip70_network_name(); - - let request = zebra_state::ReadRequest::TipPoolValues; - let response: zebra_state::ReadResponse = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - let zebra_state::ReadResponse::TipPoolValues { - tip_height, - tip_hash, - value_balance, - } = response - else { - unreachable!("unmatched response to a TipPoolValues request") - }; - - let request = zebra_state::ReadRequest::BlockHeader(tip_hash.into()); - let response: zebra_state::ReadResponse = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - let zebra_state::ReadResponse::BlockHeader { header, .. } = response else { - unreachable!("unmatched response to a BlockHeader request") - }; + // `chain` field + let chain = network.bip70_network_name(); + + let request = zebra_state::ReadRequest::TipPoolValues; + let response: zebra_state::ReadResponse = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; + + let zebra_state::ReadResponse::TipPoolValues { + tip_height, + tip_hash, + value_balance, + } = response + else { + unreachable!("unmatched response to a TipPoolValues request") + }; - let tip_block_time = header.time; + let request = zebra_state::ReadRequest::BlockHeader(tip_hash.into()); + let response: zebra_state::ReadResponse = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; - let now = Utc::now(); - let zebra_estimated_height = - NetworkChainTipHeightEstimator::new(tip_block_time, tip_height, &network) - .estimate_height_at(now); + let zebra_state::ReadResponse::BlockHeader { header, .. } = response else { + unreachable!("unmatched response to a BlockHeader request") + }; - // If we're testing the mempool, force the estimated height to be the actual tip height, otherwise, - // check if the estimated height is below Zebra's latest tip height, or if the latest tip's block time is - // later than the current time on the local clock. - let estimated_height = if tip_block_time > now - || zebra_estimated_height < tip_height - || debug_force_finished_sync - { - tip_height - } else { - zebra_estimated_height - }; + let tip_block_time = header.time; + + let now = Utc::now(); + let zebra_estimated_height = + NetworkChainTipHeightEstimator::new(tip_block_time, tip_height, &network) + .estimate_height_at(now); + + // If we're testing the mempool, force the estimated height to be the actual tip height, otherwise, + // check if the estimated height is below Zebra's latest tip height, or if the latest tip's block time is + // later than the current time on the local clock. + let estimated_height = if tip_block_time > now + || zebra_estimated_height < tip_height + || debug_force_finished_sync + { + tip_height + } else { + zebra_estimated_height + }; - // `upgrades` object + // `upgrades` object + // + // Get the network upgrades in height order, like `zcashd`. + let mut upgrades = IndexMap::new(); + for (activation_height, network_upgrade) in network.full_activation_list() { + // Zebra defines network upgrades based on incompatible consensus rule changes, + // but zcashd defines them based on ZIPs. // - // Get the network upgrades in height order, like `zcashd`. - let mut upgrades = IndexMap::new(); - for (activation_height, network_upgrade) in network.full_activation_list() { - // Zebra defines network upgrades based on incompatible consensus rule changes, - // but zcashd defines them based on ZIPs. - // - // All the network upgrades with a consensus branch ID are the same in Zebra and zcashd. - if let Some(branch_id) = network_upgrade.branch_id() { - // zcashd's RPC seems to ignore Disabled network upgrades, so Zebra does too. - let status = if tip_height >= activation_height { - NetworkUpgradeStatus::Active - } else { - NetworkUpgradeStatus::Pending - }; - - let upgrade = NetworkUpgradeInfo { - name: network_upgrade, - activation_height, - status, - }; - upgrades.insert(ConsensusBranchIdHex(branch_id), upgrade); - } + // All the network upgrades with a consensus branch ID are the same in Zebra and zcashd. + if let Some(branch_id) = network_upgrade.branch_id() { + // zcashd's RPC seems to ignore Disabled network upgrades, so Zebra does too. + let status = if tip_height >= activation_height { + NetworkUpgradeStatus::Active + } else { + NetworkUpgradeStatus::Pending + }; + + let upgrade = NetworkUpgradeInfo { + name: network_upgrade, + activation_height, + status, + }; + upgrades.insert(ConsensusBranchIdHex(branch_id), upgrade); } + } - // `consensus` object - let next_block_height = - (tip_height + 1).expect("valid chain tips are a lot less than Height::MAX"); - let consensus = TipConsensusBranch { - chain_tip: ConsensusBranchIdHex( - NetworkUpgrade::current(&network, tip_height) - .branch_id() - .unwrap_or(ConsensusBranchId::RPC_MISSING_ID), - ), - next_block: ConsensusBranchIdHex( - NetworkUpgrade::current(&network, next_block_height) - .branch_id() - .unwrap_or(ConsensusBranchId::RPC_MISSING_ID), - ), - }; + // `consensus` object + let next_block_height = + (tip_height + 1).expect("valid chain tips are a lot less than Height::MAX"); + let consensus = TipConsensusBranch { + chain_tip: ConsensusBranchIdHex( + NetworkUpgrade::current(&network, tip_height) + .branch_id() + .unwrap_or(ConsensusBranchId::RPC_MISSING_ID), + ), + next_block: ConsensusBranchIdHex( + NetworkUpgrade::current(&network, next_block_height) + .branch_id() + .unwrap_or(ConsensusBranchId::RPC_MISSING_ID), + ), + }; - let response = GetBlockChainInfo { - chain, - blocks: tip_height, - best_block_hash: tip_hash, - estimated_height, - value_pools: types::ValuePoolBalance::from_value_balance(value_balance), - upgrades, - consensus, - }; + let response = GetBlockChainInfo { + chain, + blocks: tip_height, + best_block_hash: tip_hash, + estimated_height, + value_pools: types::ValuePoolBalance::from_value_balance(value_balance), + upgrades, + consensus, + }; - Ok(response) - } - .boxed() + Ok(response) } - fn get_address_balance( - &self, - address_strings: AddressStrings, - ) -> BoxFuture> { + async fn get_address_balance(&self, address_strings: AddressStrings) -> Result { let state = self.state.clone(); - async move { - let valid_addresses = address_strings.valid_addresses()?; + let valid_addresses = address_strings.valid_addresses()?; - let request = zebra_state::ReadRequest::AddressBalance(valid_addresses); - let response = state.oneshot(request).await.map_misc_error()?; + let request = zebra_state::ReadRequest::AddressBalance(valid_addresses); + let response = state.oneshot(request).await.map_misc_error()?; - match response { - zebra_state::ReadResponse::AddressBalance(balance) => Ok(AddressBalance { - balance: u64::from(balance), - }), - _ => unreachable!("Unexpected response from state service: {response:?}"), - } + match response { + zebra_state::ReadResponse::AddressBalance(balance) => Ok(AddressBalance { + balance: u64::from(balance), + }), + _ => unreachable!("Unexpected response from state service: {response:?}"), } - .boxed() } // TODO: use HexData or GetRawTransaction::Bytes to handle the transaction data argument - fn send_raw_transaction( + async fn send_raw_transaction( &self, raw_transaction_hex: String, - ) -> BoxFuture> { + ) -> Result { let mempool = self.mempool.clone(); let queue_sender = self.queue_sender.clone(); - async move { - // Reference for the legacy error code: - // - let raw_transaction_bytes = Vec::from_hex(raw_transaction_hex) - .map_error(server::error::LegacyCode::Deserialization)?; - let raw_transaction = Transaction::zcash_deserialize(&*raw_transaction_bytes) - .map_error(server::error::LegacyCode::Deserialization)?; + // Reference for the legacy error code: + // + let raw_transaction_bytes = Vec::from_hex(raw_transaction_hex) + .map_error(server::error::LegacyCode::Deserialization)?; + let raw_transaction = Transaction::zcash_deserialize(&*raw_transaction_bytes) + .map_error(server::error::LegacyCode::Deserialization)?; - let transaction_hash = raw_transaction.hash(); + let transaction_hash = raw_transaction.hash(); - // send transaction to the rpc queue, ignore any error. - let unmined_transaction = UnminedTx::from(raw_transaction.clone()); - let _ = queue_sender.send(unmined_transaction); + // send transaction to the rpc queue, ignore any error. + let unmined_transaction = UnminedTx::from(raw_transaction.clone()); + let _ = queue_sender.send(unmined_transaction); - let transaction_parameter = mempool::Gossip::Tx(raw_transaction.into()); - let request = mempool::Request::Queue(vec![transaction_parameter]); + let transaction_parameter = mempool::Gossip::Tx(raw_transaction.into()); + let request = mempool::Request::Queue(vec![transaction_parameter]); - let response = mempool.oneshot(request).await.map_misc_error()?; + let response = mempool.oneshot(request).await.map_misc_error()?; - let mut queue_results = match response { - mempool::Response::Queued(results) => results, - _ => unreachable!("incorrect response variant from mempool service"), - }; + let mut queue_results = match response { + mempool::Response::Queued(results) => results, + _ => unreachable!("incorrect response variant from mempool service"), + }; - assert_eq!( - queue_results.len(), - 1, - "mempool service returned more results than expected" - ); + assert_eq!( + queue_results.len(), + 1, + "mempool service returned more results than expected" + ); - let queue_result = queue_results - .pop() - .expect("there should be exactly one item in Vec") - .inspect_err(|err| tracing::debug!("sent transaction to mempool: {:?}", &err)) - .map_misc_error()? - .await - .map_misc_error()?; + let queue_result = queue_results + .pop() + .expect("there should be exactly one item in Vec") + .inspect_err(|err| tracing::debug!("sent transaction to mempool: {:?}", &err)) + .map_misc_error()? + .await + .map_misc_error()?; - tracing::debug!("sent transaction to mempool: {:?}", &queue_result); + tracing::debug!("sent transaction to mempool: {:?}", &queue_result); - queue_result - .map(|_| SentTransactionHash(transaction_hash)) - // Reference for the legacy error code: - // - // Note that this error code might not exactly match the one returned by zcashd - // since zcashd's error code selection logic is more granular. We'd need to - // propagate the error coming from the verifier to be able to return more specific - // error codes. - .map_error(server::error::LegacyCode::Verify) - } - .boxed() + queue_result + .map(|_| SentTransactionHash(transaction_hash)) + // Reference for the legacy error code: + // + // Note that this error code might not exactly match the one returned by zcashd + // since zcashd's error code selection logic is more granular. We'd need to + // propagate the error coming from the verifier to be able to return more specific + // error codes. + .map_error(server::error::LegacyCode::Verify) } // # Performance @@ -750,11 +732,7 @@ where // TODO: // - use `height_from_signed_int()` to handle negative heights // (this might be better in the state request, because it needs the state height) - fn get_block( - &self, - hash_or_height: String, - verbosity: Option, - ) -> BoxFuture> { + async fn get_block(&self, hash_or_height: String, verbosity: Option) -> Result { let mut state = self.state.clone(); let verbosity = verbosity.unwrap_or(1); let network = self.network.clone(); @@ -767,277 +745,269 @@ where None }; - async move { - let hash_or_height: HashOrHeight = hash_or_height - .parse() - // Reference for the legacy error code: - // - .map_error(server::error::LegacyCode::InvalidParameter)?; - - if verbosity == 0 { - let request = zebra_state::ReadRequest::Block(hash_or_height); - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - match response { - zebra_state::ReadResponse::Block(Some(block)) => { - Ok(GetBlock::Raw(block.into())) - } - zebra_state::ReadResponse::Block(None) => Err("Block not found") - .map_error(server::error::LegacyCode::InvalidParameter), - _ => unreachable!("unmatched response to a block request"), + let hash_or_height: HashOrHeight = hash_or_height + .parse() + // Reference for the legacy error code: + // + .map_error(server::error::LegacyCode::InvalidParameter)?; + + if verbosity == 0 { + let request = zebra_state::ReadRequest::Block(hash_or_height); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; + + match response { + zebra_state::ReadResponse::Block(Some(block)) => Ok(GetBlock::Raw(block.into())), + zebra_state::ReadResponse::Block(None) => { + Err("Block not found").map_error(server::error::LegacyCode::InvalidParameter) } - } else if let Some(get_block_header_future) = get_block_header_future { - let get_block_header_result: Result = get_block_header_future.await; + _ => unreachable!("unmatched response to a block request"), + } + } else if let Some(get_block_header_future) = get_block_header_future { + let get_block_header_result: Result = get_block_header_future.await; - let GetBlockHeader::Object(block_header) = get_block_header_result? else { - panic!("must return Object") - }; + let GetBlockHeader::Object(block_header) = get_block_header_result? else { + panic!("must return Object") + }; - let GetBlockHeaderObject { - hash, - confirmations, - height, - version, - merkle_root, - final_sapling_root, - sapling_tree_size, - time, - nonce, - solution, - bits, - difficulty, - previous_block_hash, - next_block_hash, - } = *block_header; - - let transactions_request = match verbosity { - 1 => zebra_state::ReadRequest::TransactionIdsForBlock(hash_or_height), - 2 => zebra_state::ReadRequest::Block(hash_or_height), - _other => panic!("get_block_header_fut should be none"), - }; + let GetBlockHeaderObject { + hash, + confirmations, + height, + version, + merkle_root, + final_sapling_root, + sapling_tree_size, + time, + nonce, + solution, + bits, + difficulty, + previous_block_hash, + next_block_hash, + } = *block_header; + + let transactions_request = match verbosity { + 1 => zebra_state::ReadRequest::TransactionIdsForBlock(hash_or_height), + 2 => zebra_state::ReadRequest::Block(hash_or_height), + _other => panic!("get_block_header_fut should be none"), + }; + // # Concurrency + // + // We look up by block hash so the hash, transaction IDs, and confirmations + // are consistent. + let hash_or_height = hash.0.into(); + let requests = vec![ + // Get transaction IDs from the transaction index by block hash + // // # Concurrency // - // We look up by block hash so the hash, transaction IDs, and confirmations - // are consistent. - let hash_or_height = hash.0.into(); - let requests = vec![ - // Get transaction IDs from the transaction index by block hash - // - // # Concurrency - // - // A block's transaction IDs are never modified, so all possible responses are - // valid. Clients that query block heights must be able to handle chain forks, - // including getting transaction IDs from any chain fork. - transactions_request, - // Orchard trees - zebra_state::ReadRequest::OrchardTree(hash_or_height), - ]; - - let mut futs = FuturesOrdered::new(); - - for request in requests { - futs.push_back(state.clone().oneshot(request)); - } + // A block's transaction IDs are never modified, so all possible responses are + // valid. Clients that query block heights must be able to handle chain forks, + // including getting transaction IDs from any chain fork. + transactions_request, + // Orchard trees + zebra_state::ReadRequest::OrchardTree(hash_or_height), + ]; + + let mut futs = FuturesOrdered::new(); + + for request in requests { + futs.push_back(state.clone().oneshot(request)); + } - let tx_ids_response = futs.next().await.expect("`futs` should not be empty"); - let tx: Vec<_> = match tx_ids_response.map_misc_error()? { - zebra_state::ReadResponse::TransactionIdsForBlock(tx_ids) => tx_ids - .ok_or_misc_error("block not found")? - .iter() - .map(|tx_id| GetBlockTransaction::Hash(*tx_id)) - .collect(), - zebra_state::ReadResponse::Block(block) => block - .ok_or_misc_error("Block not found")? - .transactions - .iter() - .map(|tx| { - GetBlockTransaction::Object(TransactionObject::from_transaction( - tx.clone(), - Some(height), - Some( - confirmations - .try_into() - .expect("should be less than max block height, i32::MAX"), - ), - )) - }) - .collect(), - _ => unreachable!("unmatched response to a transaction_ids_for_block request"), - }; + let tx_ids_response = futs.next().await.expect("`futs` should not be empty"); + let tx: Vec<_> = match tx_ids_response.map_misc_error()? { + zebra_state::ReadResponse::TransactionIdsForBlock(tx_ids) => tx_ids + .ok_or_misc_error("block not found")? + .iter() + .map(|tx_id| GetBlockTransaction::Hash(*tx_id)) + .collect(), + zebra_state::ReadResponse::Block(block) => block + .ok_or_misc_error("Block not found")? + .transactions + .iter() + .map(|tx| { + GetBlockTransaction::Object(TransactionObject::from_transaction( + tx.clone(), + Some(height), + Some( + confirmations + .try_into() + .expect("should be less than max block height, i32::MAX"), + ), + )) + }) + .collect(), + _ => unreachable!("unmatched response to a transaction_ids_for_block request"), + }; - let orchard_tree_response = futs.next().await.expect("`futs` should not be empty"); - let zebra_state::ReadResponse::OrchardTree(orchard_tree) = - orchard_tree_response.map_misc_error()? - else { - unreachable!("unmatched response to a OrchardTree request"); - }; + let orchard_tree_response = futs.next().await.expect("`futs` should not be empty"); + let zebra_state::ReadResponse::OrchardTree(orchard_tree) = + orchard_tree_response.map_misc_error()? + else { + unreachable!("unmatched response to a OrchardTree request"); + }; - let nu5_activation = NetworkUpgrade::Nu5.activation_height(&network); + let nu5_activation = NetworkUpgrade::Nu5.activation_height(&network); - // This could be `None` if there's a chain reorg between state queries. - let orchard_tree = orchard_tree.ok_or_misc_error("missing Orchard tree")?; + // This could be `None` if there's a chain reorg between state queries. + let orchard_tree = orchard_tree.ok_or_misc_error("missing Orchard tree")?; - let final_orchard_root = match nu5_activation { - Some(activation_height) if height >= activation_height => { - Some(orchard_tree.root().into()) - } - _other => None, - }; + let final_orchard_root = match nu5_activation { + Some(activation_height) if height >= activation_height => { + Some(orchard_tree.root().into()) + } + _other => None, + }; - let sapling = SaplingTrees { - size: sapling_tree_size, - }; + let sapling = SaplingTrees { + size: sapling_tree_size, + }; - let orchard_tree_size = orchard_tree.count(); - let orchard = OrchardTrees { - size: orchard_tree_size, - }; + let orchard_tree_size = orchard_tree.count(); + let orchard = OrchardTrees { + size: orchard_tree_size, + }; - let trees = GetBlockTrees { sapling, orchard }; - - Ok(GetBlock::Object { - hash, - confirmations, - height: Some(height), - version: Some(version), - merkle_root: Some(merkle_root), - time: Some(time), - nonce: Some(nonce), - solution: Some(solution), - bits: Some(bits), - difficulty: Some(difficulty), - tx, - trees, - size: None, - final_sapling_root: Some(final_sapling_root), - final_orchard_root, - previous_block_hash: Some(previous_block_hash), - next_block_hash, - }) - } else { - Err("invalid verbosity value") - .map_error(server::error::LegacyCode::InvalidParameter) - } + let trees = GetBlockTrees { sapling, orchard }; + + Ok(GetBlock::Object { + hash, + confirmations, + height: Some(height), + version: Some(version), + merkle_root: Some(merkle_root), + time: Some(time), + nonce: Some(nonce), + solution: Some(solution), + bits: Some(bits), + difficulty: Some(difficulty), + tx, + trees, + size: None, + final_sapling_root: Some(final_sapling_root), + final_orchard_root, + previous_block_hash: Some(previous_block_hash), + next_block_hash, + }) + } else { + Err("invalid verbosity value").map_error(server::error::LegacyCode::InvalidParameter) } - .boxed() } - fn get_block_header( + async fn get_block_header( &self, hash_or_height: String, verbose: Option, - ) -> BoxFuture> { + ) -> Result { let state = self.state.clone(); let verbose = verbose.unwrap_or(true); let network = self.network.clone(); - async move { - let hash_or_height: HashOrHeight = hash_or_height - .parse() - .map_error(server::error::LegacyCode::InvalidAddressOrKey)?; - let zebra_state::ReadResponse::BlockHeader { - header, - hash, - height, - next_block_hash, - } = state + let hash_or_height: HashOrHeight = hash_or_height + .parse() + .map_error(server::error::LegacyCode::InvalidAddressOrKey)?; + let zebra_state::ReadResponse::BlockHeader { + header, + hash, + height, + next_block_hash, + } = state + .clone() + .oneshot(zebra_state::ReadRequest::BlockHeader(hash_or_height)) + .await + .map_err(|_| "block height not in best chain") + .map_error( + // ## Compatibility with `zcashd`. + // + // Since this function is reused by getblock(), we return the errors + // expected by it (they differ whether a hash or a height was passed). + if hash_or_height.hash().is_some() { + server::error::LegacyCode::InvalidAddressOrKey + } else { + server::error::LegacyCode::InvalidParameter + }, + )? + else { + panic!("unexpected response to BlockHeader request") + }; + + let response = if !verbose { + GetBlockHeader::Raw(HexData(header.zcash_serialize_to_vec().map_misc_error()?)) + } else { + let zebra_state::ReadResponse::SaplingTree(sapling_tree) = state .clone() - .oneshot(zebra_state::ReadRequest::BlockHeader(hash_or_height)) + .oneshot(zebra_state::ReadRequest::SaplingTree(hash_or_height)) .await - .map_err(|_| "block height not in best chain") - .map_error( - // ## Compatibility with `zcashd`. - // - // Since this function is reused by getblock(), we return the errors - // expected by it (they differ whether a hash or a height was passed). - if hash_or_height.hash().is_some() { - server::error::LegacyCode::InvalidAddressOrKey - } else { - server::error::LegacyCode::InvalidParameter - }, - )? + .map_misc_error()? else { - panic!("unexpected response to BlockHeader request") + panic!("unexpected response to SaplingTree request") }; - let response = if !verbose { - GetBlockHeader::Raw(HexData(header.zcash_serialize_to_vec().map_misc_error()?)) - } else { - let zebra_state::ReadResponse::SaplingTree(sapling_tree) = state - .clone() - .oneshot(zebra_state::ReadRequest::SaplingTree(hash_or_height)) - .await - .map_misc_error()? - else { - panic!("unexpected response to SaplingTree request") - }; + // This could be `None` if there's a chain reorg between state queries. + let sapling_tree = sapling_tree.ok_or_misc_error("missing Sapling tree")?; - // This could be `None` if there's a chain reorg between state queries. - let sapling_tree = sapling_tree.ok_or_misc_error("missing Sapling tree")?; + let zebra_state::ReadResponse::Depth(depth) = state + .clone() + .oneshot(zebra_state::ReadRequest::Depth(hash)) + .await + .map_misc_error()? + else { + panic!("unexpected response to SaplingTree request") + }; - let zebra_state::ReadResponse::Depth(depth) = state - .clone() - .oneshot(zebra_state::ReadRequest::Depth(hash)) - .await - .map_misc_error()? - else { - panic!("unexpected response to SaplingTree request") + // From + // TODO: Deduplicate const definition, consider refactoring this to avoid duplicate logic + const NOT_IN_BEST_CHAIN_CONFIRMATIONS: i64 = -1; + + // Confirmations are one more than the depth. + // Depth is limited by height, so it will never overflow an i64. + let confirmations = depth + .map(|depth| i64::from(depth) + 1) + .unwrap_or(NOT_IN_BEST_CHAIN_CONFIRMATIONS); + + let mut nonce = *header.nonce; + nonce.reverse(); + + let sapling_activation = NetworkUpgrade::Sapling.activation_height(&network); + let sapling_tree_size = sapling_tree.count(); + let final_sapling_root: [u8; 32] = + if sapling_activation.is_some() && height >= sapling_activation.unwrap() { + let mut root: [u8; 32] = sapling_tree.root().into(); + root.reverse(); + root + } else { + [0; 32] }; - // From - // TODO: Deduplicate const definition, consider refactoring this to avoid duplicate logic - const NOT_IN_BEST_CHAIN_CONFIRMATIONS: i64 = -1; - - // Confirmations are one more than the depth. - // Depth is limited by height, so it will never overflow an i64. - let confirmations = depth - .map(|depth| i64::from(depth) + 1) - .unwrap_or(NOT_IN_BEST_CHAIN_CONFIRMATIONS); - - let mut nonce = *header.nonce; - nonce.reverse(); - - let sapling_activation = NetworkUpgrade::Sapling.activation_height(&network); - let sapling_tree_size = sapling_tree.count(); - let final_sapling_root: [u8; 32] = - if sapling_activation.is_some() && height >= sapling_activation.unwrap() { - let mut root: [u8; 32] = sapling_tree.root().into(); - root.reverse(); - root - } else { - [0; 32] - }; - - let difficulty = header.difficulty_threshold.relative_to_network(&network); - - let block_header = GetBlockHeaderObject { - hash: GetBlockHash(hash), - confirmations, - height, - version: header.version, - merkle_root: header.merkle_root, - final_sapling_root, - sapling_tree_size, - time: header.time.timestamp(), - nonce, - solution: header.solution, - bits: header.difficulty_threshold, - difficulty, - previous_block_hash: GetBlockHash(header.previous_block_hash), - next_block_hash: next_block_hash.map(GetBlockHash), - }; + let difficulty = header.difficulty_threshold.relative_to_network(&network); - GetBlockHeader::Object(Box::new(block_header)) + let block_header = GetBlockHeaderObject { + hash: GetBlockHash(hash), + confirmations, + height, + version: header.version, + merkle_root: header.merkle_root, + final_sapling_root, + sapling_tree_size, + time: header.time.timestamp(), + nonce, + solution: header.solution, + bits: header.difficulty_threshold, + difficulty, + previous_block_hash: GetBlockHash(header.previous_block_hash), + next_block_hash: next_block_hash.map(GetBlockHash), }; - Ok(response) - } - .boxed() + GetBlockHeader::Object(Box::new(block_header)) + }; + + Ok(response) } fn get_best_block_hash(&self) -> Result { @@ -1054,7 +1024,7 @@ where .ok_or_misc_error("No blocks in state") } - fn get_raw_mempool(&self) -> BoxFuture>> { + async fn get_raw_mempool(&self) -> Result> { #[cfg(feature = "getblocktemplate-rpcs")] use zebra_chain::block::MAX_BLOCK_BYTES; @@ -1064,421 +1034,400 @@ where let mut mempool = self.mempool.clone(); - async move { - #[cfg(feature = "getblocktemplate-rpcs")] - let request = if should_use_zcashd_order { - mempool::Request::FullTransactions - } else { - mempool::Request::TransactionIds - }; - - #[cfg(not(feature = "getblocktemplate-rpcs"))] - let request = mempool::Request::TransactionIds; + #[cfg(feature = "getblocktemplate-rpcs")] + let request = if should_use_zcashd_order { + mempool::Request::FullTransactions + } else { + mempool::Request::TransactionIds + }; - // `zcashd` doesn't check if it is synced to the tip here, so we don't either. - let response = mempool - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; + #[cfg(not(feature = "getblocktemplate-rpcs"))] + let request = mempool::Request::TransactionIds; - match response { - #[cfg(feature = "getblocktemplate-rpcs")] - mempool::Response::FullTransactions { - mut transactions, - transaction_dependencies: _, - last_seen_tip_hash: _, - } => { - // Sort transactions in descending order by fee/size, using hash in serialized byte order as a tie-breaker - transactions.sort_by_cached_key(|tx| { - // zcashd uses modified fee here but Zebra doesn't currently - // support prioritizing transactions - std::cmp::Reverse(( - i64::from(tx.miner_fee) as u128 * MAX_BLOCK_BYTES as u128 - / tx.transaction.size as u128, - // transaction hashes are compared in their serialized byte-order. - tx.transaction.id.mined_id(), - )) - }); + // `zcashd` doesn't check if it is synced to the tip here, so we don't either. + let response = mempool + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; - let tx_ids: Vec = transactions - .iter() - .map(|unmined_tx| unmined_tx.transaction.id.mined_id().encode_hex()) - .collect(); + match response { + #[cfg(feature = "getblocktemplate-rpcs")] + mempool::Response::FullTransactions { + mut transactions, + transaction_dependencies: _, + last_seen_tip_hash: _, + } => { + // Sort transactions in descending order by fee/size, using hash in serialized byte order as a tie-breaker + transactions.sort_by_cached_key(|tx| { + // zcashd uses modified fee here but Zebra doesn't currently + // support prioritizing transactions + std::cmp::Reverse(( + i64::from(tx.miner_fee) as u128 * MAX_BLOCK_BYTES as u128 + / tx.transaction.size as u128, + // transaction hashes are compared in their serialized byte-order. + tx.transaction.id.mined_id(), + )) + }); - Ok(tx_ids) - } + let tx_ids: Vec = transactions + .iter() + .map(|unmined_tx| unmined_tx.transaction.id.mined_id().encode_hex()) + .collect(); - mempool::Response::TransactionIds(unmined_transaction_ids) => { - let mut tx_ids: Vec = unmined_transaction_ids - .iter() - .map(|id| id.mined_id().encode_hex()) - .collect(); + Ok(tx_ids) + } - // Sort returned transaction IDs in numeric/string order. - tx_ids.sort(); + mempool::Response::TransactionIds(unmined_transaction_ids) => { + let mut tx_ids: Vec = unmined_transaction_ids + .iter() + .map(|id| id.mined_id().encode_hex()) + .collect(); - Ok(tx_ids) - } + // Sort returned transaction IDs in numeric/string order. + tx_ids.sort(); - _ => unreachable!("unmatched response to a transactionids request"), + Ok(tx_ids) } + + _ => unreachable!("unmatched response to a transactionids request"), } - .boxed() } - fn get_raw_transaction( + async fn get_raw_transaction( &self, txid: String, verbose: Option, - ) -> BoxFuture> { + ) -> Result { let mut state = self.state.clone(); let mut mempool = self.mempool.clone(); let verbose = verbose.unwrap_or(0) != 0; - async move { - // Reference for the legacy error code: - // - let txid = transaction::Hash::from_hex(txid) - .map_error(server::error::LegacyCode::InvalidAddressOrKey)?; - - // Check the mempool first. - match mempool - .ready() - .and_then(|service| { - service.call(mempool::Request::TransactionsByMinedId([txid].into())) - }) - .await - .map_misc_error()? - { - mempool::Response::Transactions(txns) => { - if let Some(tx) = txns.first() { - return Ok(if verbose { - GetRawTransaction::Object(TransactionObject::from_transaction( - tx.transaction.clone(), - None, - None, - )) - } else { - let hex = tx.transaction.clone().into(); - GetRawTransaction::Raw(hex) - }); - } + // Reference for the legacy error code: + // + let txid = transaction::Hash::from_hex(txid) + .map_error(server::error::LegacyCode::InvalidAddressOrKey)?; + + // Check the mempool first. + match mempool + .ready() + .and_then(|service| { + service.call(mempool::Request::TransactionsByMinedId([txid].into())) + }) + .await + .map_misc_error()? + { + mempool::Response::Transactions(txns) => { + if let Some(tx) = txns.first() { + return Ok(if verbose { + GetRawTransaction::Object(TransactionObject::from_transaction( + tx.transaction.clone(), + None, + None, + )) + } else { + let hex = tx.transaction.clone().into(); + GetRawTransaction::Raw(hex) + }); } + } - _ => unreachable!("unmatched response to a `TransactionsByMinedId` request"), - }; - - // If the tx wasn't in the mempool, check the state. - match state - .ready() - .and_then(|service| service.call(zebra_state::ReadRequest::Transaction(txid))) - .await - .map_misc_error()? - { - zebra_state::ReadResponse::Transaction(Some(tx)) => Ok(if verbose { - GetRawTransaction::Object(TransactionObject::from_transaction( - tx.tx.clone(), - Some(tx.height), - Some(tx.confirmations), - )) - } else { - let hex = tx.tx.into(); - GetRawTransaction::Raw(hex) - }), + _ => unreachable!("unmatched response to a `TransactionsByMinedId` request"), + }; - zebra_state::ReadResponse::Transaction(None) => { - Err("No such mempool or main chain transaction") - .map_error(server::error::LegacyCode::InvalidAddressOrKey) - } + // If the tx wasn't in the mempool, check the state. + match state + .ready() + .and_then(|service| service.call(zebra_state::ReadRequest::Transaction(txid))) + .await + .map_misc_error()? + { + zebra_state::ReadResponse::Transaction(Some(tx)) => Ok(if verbose { + GetRawTransaction::Object(TransactionObject::from_transaction( + tx.tx.clone(), + Some(tx.height), + Some(tx.confirmations), + )) + } else { + let hex = tx.tx.into(); + GetRawTransaction::Raw(hex) + }), - _ => unreachable!("unmatched response to a `Transaction` read request"), + zebra_state::ReadResponse::Transaction(None) => { + Err("No such mempool or main chain transaction") + .map_error(server::error::LegacyCode::InvalidAddressOrKey) } + + _ => unreachable!("unmatched response to a `Transaction` read request"), } - .boxed() } // TODO: // - use `height_from_signed_int()` to handle negative heights // (this might be better in the state request, because it needs the state height) - fn z_get_treestate(&self, hash_or_height: String) -> BoxFuture> { + async fn z_get_treestate(&self, hash_or_height: String) -> Result { let mut state = self.state.clone(); let network = self.network.clone(); - async move { - // Reference for the legacy error code: - // - let hash_or_height = hash_or_height - .parse() - .map_error(server::error::LegacyCode::InvalidParameter)?; + // Reference for the legacy error code: + // + let hash_or_height = hash_or_height + .parse() + .map_error(server::error::LegacyCode::InvalidParameter)?; - // Fetch the block referenced by [`hash_or_height`] from the state. - // - // # Concurrency - // - // For consistency, this lookup must be performed first, then all the other lookups must - // be based on the hash. - // - // TODO: If this RPC is called a lot, just get the block header, rather than the whole block. - let block = match state - .ready() - .and_then(|service| service.call(zebra_state::ReadRequest::Block(hash_or_height))) - .await - .map_misc_error()? - { - zebra_state::ReadResponse::Block(Some(block)) => block, - zebra_state::ReadResponse::Block(None) => { - // Reference for the legacy error code: - // - return Err("the requested block is not in the main chain") - .map_error(server::error::LegacyCode::InvalidParameter); - } - _ => unreachable!("unmatched response to a block request"), - }; + // Fetch the block referenced by [`hash_or_height`] from the state. + // + // # Concurrency + // + // For consistency, this lookup must be performed first, then all the other lookups must + // be based on the hash. + // + // TODO: If this RPC is called a lot, just get the block header, rather than the whole block. + let block = match state + .ready() + .and_then(|service| service.call(zebra_state::ReadRequest::Block(hash_or_height))) + .await + .map_misc_error()? + { + zebra_state::ReadResponse::Block(Some(block)) => block, + zebra_state::ReadResponse::Block(None) => { + // Reference for the legacy error code: + // + return Err("the requested block is not in the main chain") + .map_error(server::error::LegacyCode::InvalidParameter); + } + _ => unreachable!("unmatched response to a block request"), + }; - let hash = hash_or_height - .hash_or_else(|_| Some(block.hash())) - .expect("block hash"); + let hash = hash_or_height + .hash_or_else(|_| Some(block.hash())) + .expect("block hash"); - let height = hash_or_height - .height_or_else(|_| block.coinbase_height()) - .expect("verified blocks have a coinbase height"); + let height = hash_or_height + .height_or_else(|_| block.coinbase_height()) + .expect("verified blocks have a coinbase height"); - let time = u32::try_from(block.header.time.timestamp()) - .expect("Timestamps of valid blocks always fit into u32."); + let time = u32::try_from(block.header.time.timestamp()) + .expect("Timestamps of valid blocks always fit into u32."); - let sapling_nu = zcash_primitives::consensus::NetworkUpgrade::Sapling; - let sapling = if network.is_nu_active(sapling_nu, height.into()) { - match state - .ready() - .and_then(|service| { - service.call(zebra_state::ReadRequest::SaplingTree(hash.into())) - }) - .await - .map_misc_error()? - { - zebra_state::ReadResponse::SaplingTree(tree) => tree.map(|t| t.to_rpc_bytes()), - _ => unreachable!("unmatched response to a Sapling tree request"), - } - } else { - None - }; + let sapling_nu = zcash_primitives::consensus::NetworkUpgrade::Sapling; + let sapling = if network.is_nu_active(sapling_nu, height.into()) { + match state + .ready() + .and_then(|service| { + service.call(zebra_state::ReadRequest::SaplingTree(hash.into())) + }) + .await + .map_misc_error()? + { + zebra_state::ReadResponse::SaplingTree(tree) => tree.map(|t| t.to_rpc_bytes()), + _ => unreachable!("unmatched response to a Sapling tree request"), + } + } else { + None + }; - let orchard_nu = zcash_primitives::consensus::NetworkUpgrade::Nu5; - let orchard = if network.is_nu_active(orchard_nu, height.into()) { - match state - .ready() - .and_then(|service| { - service.call(zebra_state::ReadRequest::OrchardTree(hash.into())) - }) - .await - .map_misc_error()? - { - zebra_state::ReadResponse::OrchardTree(tree) => tree.map(|t| t.to_rpc_bytes()), - _ => unreachable!("unmatched response to an Orchard tree request"), - } - } else { - None - }; + let orchard_nu = zcash_primitives::consensus::NetworkUpgrade::Nu5; + let orchard = if network.is_nu_active(orchard_nu, height.into()) { + match state + .ready() + .and_then(|service| { + service.call(zebra_state::ReadRequest::OrchardTree(hash.into())) + }) + .await + .map_misc_error()? + { + zebra_state::ReadResponse::OrchardTree(tree) => tree.map(|t| t.to_rpc_bytes()), + _ => unreachable!("unmatched response to an Orchard tree request"), + } + } else { + None + }; - Ok(GetTreestate::from_parts( - hash, height, time, sapling, orchard, - )) - } - .boxed() + Ok(GetTreestate::from_parts( + hash, height, time, sapling, orchard, + )) } - fn z_get_subtrees_by_index( + async fn z_get_subtrees_by_index( &self, pool: String, start_index: NoteCommitmentSubtreeIndex, limit: Option, - ) -> BoxFuture> { + ) -> Result { let mut state = self.state.clone(); - async move { - const POOL_LIST: &[&str] = &["sapling", "orchard"]; + const POOL_LIST: &[&str] = &["sapling", "orchard"]; - if pool == "sapling" { - let request = zebra_state::ReadRequest::SaplingSubtrees { start_index, limit }; - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - let subtrees = match response { - zebra_state::ReadResponse::SaplingSubtrees(subtrees) => subtrees, - _ => unreachable!("unmatched response to a subtrees request"), - }; + if pool == "sapling" { + let request = zebra_state::ReadRequest::SaplingSubtrees { start_index, limit }; + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; - let subtrees = subtrees - .values() - .map(|subtree| SubtreeRpcData { - root: subtree.root.encode_hex(), - end_height: subtree.end_height, - }) - .collect(); + let subtrees = match response { + zebra_state::ReadResponse::SaplingSubtrees(subtrees) => subtrees, + _ => unreachable!("unmatched response to a subtrees request"), + }; - Ok(GetSubtrees { - pool, - start_index, - subtrees, + let subtrees = subtrees + .values() + .map(|subtree| SubtreeRpcData { + root: subtree.root.encode_hex(), + end_height: subtree.end_height, }) - } else if pool == "orchard" { - let request = zebra_state::ReadRequest::OrchardSubtrees { start_index, limit }; - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - - let subtrees = match response { - zebra_state::ReadResponse::OrchardSubtrees(subtrees) => subtrees, - _ => unreachable!("unmatched response to a subtrees request"), - }; + .collect(); - let subtrees = subtrees - .values() - .map(|subtree| SubtreeRpcData { - root: subtree.root.encode_hex(), - end_height: subtree.end_height, - }) - .collect(); + Ok(GetSubtrees { + pool, + start_index, + subtrees, + }) + } else if pool == "orchard" { + let request = zebra_state::ReadRequest::OrchardSubtrees { start_index, limit }; + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; - Ok(GetSubtrees { - pool, - start_index, - subtrees, - }) - } else { - Err(Error { - code: server::error::LegacyCode::Misc.into(), - message: format!("invalid pool name, must be one of: {:?}", POOL_LIST), - data: None, + let subtrees = match response { + zebra_state::ReadResponse::OrchardSubtrees(subtrees) => subtrees, + _ => unreachable!("unmatched response to a subtrees request"), + }; + + let subtrees = subtrees + .values() + .map(|subtree| SubtreeRpcData { + root: subtree.root.encode_hex(), + end_height: subtree.end_height, }) - } + .collect(); + + Ok(GetSubtrees { + pool, + start_index, + subtrees, + }) + } else { + Err(ErrorObject::owned( + server::error::LegacyCode::Misc.into(), + format!("invalid pool name, must be one of: {:?}", POOL_LIST).as_str(), + None::<()>, + )) } - .boxed() } - fn get_address_tx_ids( - &self, - request: GetAddressTxIdsRequest, - ) -> BoxFuture>> { + async fn get_address_tx_ids(&self, request: GetAddressTxIdsRequest) -> Result> { let mut state = self.state.clone(); let latest_chain_tip = self.latest_chain_tip.clone(); let start = Height(request.start); let end = Height(request.end); - async move { - let chain_height = best_chain_tip_height(&latest_chain_tip)?; - - // height range checks - check_height_range(start, end, chain_height)?; + let chain_height = best_chain_tip_height(&latest_chain_tip)?; - let valid_addresses = AddressStrings { - addresses: request.addresses, - } - .valid_addresses()?; + // height range checks + check_height_range(start, end, chain_height)?; - let request = zebra_state::ReadRequest::TransactionIdsByAddresses { - addresses: valid_addresses, - height_range: start..=end, - }; - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; + let valid_addresses = AddressStrings { + addresses: request.addresses, + } + .valid_addresses()?; - let hashes = match response { - zebra_state::ReadResponse::AddressesTransactionIds(hashes) => { - let mut last_tx_location = TransactionLocation::from_usize(Height(0), 0); - - hashes - .iter() - .map(|(tx_loc, tx_id)| { - // Check that the returned transactions are in chain order. - assert!( - *tx_loc > last_tx_location, - "Transactions were not in chain order:\n\ + let request = zebra_state::ReadRequest::TransactionIdsByAddresses { + addresses: valid_addresses, + height_range: start..=end, + }; + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; + + let hashes = match response { + zebra_state::ReadResponse::AddressesTransactionIds(hashes) => { + let mut last_tx_location = TransactionLocation::from_usize(Height(0), 0); + + hashes + .iter() + .map(|(tx_loc, tx_id)| { + // Check that the returned transactions are in chain order. + assert!( + *tx_loc > last_tx_location, + "Transactions were not in chain order:\n\ {tx_loc:?} {tx_id:?} was after:\n\ {last_tx_location:?}", - ); + ); - last_tx_location = *tx_loc; + last_tx_location = *tx_loc; - tx_id.to_string() - }) - .collect() - } - _ => unreachable!("unmatched response to a TransactionsByAddresses request"), - }; + tx_id.to_string() + }) + .collect() + } + _ => unreachable!("unmatched response to a TransactionsByAddresses request"), + }; - Ok(hashes) - } - .boxed() + Ok(hashes) } - fn get_address_utxos( + async fn get_address_utxos( &self, address_strings: AddressStrings, - ) -> BoxFuture>> { + ) -> Result> { let mut state = self.state.clone(); let mut response_utxos = vec![]; - async move { - let valid_addresses = address_strings.valid_addresses()?; - - // get utxos data for addresses - let request = zebra_state::ReadRequest::UtxosByAddresses(valid_addresses); - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_misc_error()?; - let utxos = match response { - zebra_state::ReadResponse::AddressUtxos(utxos) => utxos, - _ => unreachable!("unmatched response to a UtxosByAddresses request"), - }; + let valid_addresses = address_strings.valid_addresses()?; + + // get utxos data for addresses + let request = zebra_state::ReadRequest::UtxosByAddresses(valid_addresses); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_misc_error()?; + let utxos = match response { + zebra_state::ReadResponse::AddressUtxos(utxos) => utxos, + _ => unreachable!("unmatched response to a UtxosByAddresses request"), + }; - let mut last_output_location = OutputLocation::from_usize(Height(0), 0, 0); - - for utxo_data in utxos.utxos() { - let address = utxo_data.0; - let txid = *utxo_data.1; - let height = utxo_data.2.height(); - let output_index = utxo_data.2.output_index(); - let script = utxo_data.3.lock_script.clone(); - let satoshis = u64::from(utxo_data.3.value); - - let output_location = *utxo_data.2; - // Check that the returned UTXOs are in chain order. - assert!( - output_location > last_output_location, - "UTXOs were not in chain order:\n\ + let mut last_output_location = OutputLocation::from_usize(Height(0), 0, 0); + + for utxo_data in utxos.utxos() { + let address = utxo_data.0; + let txid = *utxo_data.1; + let height = utxo_data.2.height(); + let output_index = utxo_data.2.output_index(); + let script = utxo_data.3.lock_script.clone(); + let satoshis = u64::from(utxo_data.3.value); + + let output_location = *utxo_data.2; + // Check that the returned UTXOs are in chain order. + assert!( + output_location > last_output_location, + "UTXOs were not in chain order:\n\ {output_location:?} {address:?} {txid:?} was after:\n\ {last_output_location:?}", - ); - - let entry = GetAddressUtxos { - address, - txid, - output_index, - script, - satoshis, - height, - }; - response_utxos.push(entry); + ); - last_output_location = output_location; - } + let entry = GetAddressUtxos { + address, + txid, + output_index, + script, + satoshis, + height, + }; + response_utxos.push(entry); - Ok(response_utxos) + last_output_location = output_location; } - .boxed() + + Ok(response_utxos) } fn stop(&self) -> Result { @@ -1486,25 +1435,25 @@ where if self.network.is_regtest() { match nix::sys::signal::raise(nix::sys::signal::SIGINT) { Ok(_) => Ok("Zebra server stopping".to_string()), - Err(error) => Err(Error { - code: ErrorCode::InternalError, - message: format!("Failed to shut down: {}", error), - data: None, - }), + Err(error) => Err(ErrorObject::owned( + ErrorCode::InternalError.code(), + format!("Failed to shut down: {}", error).as_str(), + None::<()>, + )), } } else { - Err(Error { - code: ErrorCode::MethodNotFound, - message: "stop is only available on regtest networks".to_string(), - data: None, - }) + Err(ErrorObject::borrowed( + ErrorCode::MethodNotFound.code(), + "stop is only available on regtest networks", + None, + )) } #[cfg(target_os = "windows")] - Err(Error { - code: ErrorCode::MethodNotFound, - message: "stop is not available in windows targets".to_string(), - data: None, - }) + Err(ErrorObject::borrowed( + ErrorCode::MethodNotFound.code(), + "stop is not available in windows targets", + None, + )) } } @@ -1591,8 +1540,8 @@ impl Default for GetBlockChainInfo { /// A wrapper type with a list of transparent address strings. /// -/// This is used for the input parameter of [`Rpc::get_address_balance`], -/// [`Rpc::get_address_tx_ids`] and [`Rpc::get_address_utxos`]. +/// This is used for the input parameter of [`RpcServer::get_address_balance`], +/// [`RpcServer::get_address_tx_ids`] and [`RpcServer::get_address_utxos`]. #[derive(Clone, Debug, Eq, PartialEq, Hash, serde::Deserialize)] pub struct AddressStrings { /// A list of transparent address strings. @@ -1749,7 +1698,7 @@ impl Default for SentTransactionHash { /// Response to a `getblock` RPC request. /// -/// See the notes for the [`Rpc::get_block`] method. +/// See the notes for the [`RpcServer::get_block`] method. #[derive(Clone, Debug, PartialEq, serde::Serialize)] #[serde(untagged)] #[allow(clippy::large_enum_variant)] //TODO: create a struct for the Object and Box it @@ -1881,7 +1830,7 @@ pub enum GetBlockTransaction { /// Response to a `getblockheader` RPC request. /// -/// See the notes for the [`Rpc::get_block_header`] method. +/// See the notes for the [`RpcServer::get_block_header`] method. #[derive(Clone, Debug, PartialEq, serde::Serialize)] #[serde(untagged)] pub enum GetBlockHeader { @@ -1895,7 +1844,7 @@ pub enum GetBlockHeader { #[derive(Clone, Debug, PartialEq, serde::Serialize)] /// Verbose response to a `getblockheader` RPC request. /// -/// See the notes for the [`Rpc::get_block_header`] method. +/// See the notes for the [`RpcServer::get_block_header`] method. pub struct GetBlockHeaderObject { /// The hash of the requested block. pub hash: GetBlockHash, @@ -1984,7 +1933,7 @@ impl Default for GetBlockHeaderObject { /// /// Contains the hex-encoded hash of the requested block. /// -/// Also see the notes for the [`Rpc::get_best_block_hash`] and `get_block_hash` methods. +/// Also see the notes for the [`RpcServer::get_best_block_hash`] and `get_block_hash` methods. #[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] #[serde(transparent)] pub struct GetBlockHash(#[serde(with = "hex")] pub block::Hash); @@ -2200,19 +2149,25 @@ impl OrchardTrees { /// Check if provided height range is valid for address indexes. fn check_height_range(start: Height, end: Height, chain_height: Height) -> Result<()> { if start == Height(0) || end == Height(0) { - return Err(Error::invalid_params(format!( - "start {start:?} and end {end:?} must both be greater than zero" - ))); + return Err(ErrorObject::owned( + ErrorCode::InvalidParams.code(), + format!("start {start:?} and end {end:?} must both be greater than zero"), + None::<()>, + )); } if start > end { - return Err(Error::invalid_params(format!( - "start {start:?} must be less than or equal to end {end:?}" - ))); + return Err(ErrorObject::owned( + ErrorCode::InvalidParams.code(), + format!("start {start:?} must be less than or equal to end {end:?}"), + None::<()>, + )); } if start > chain_height || end > chain_height { - return Err(Error::invalid_params(format!( - "start {start:?} and end {end:?} must both be less than or equal to the chain tip {chain_height:?}" - ))); + return Err(ErrorObject::owned( + ErrorCode::InvalidParams.code(), + format!("start {start:?} and end {end:?} must both be less than or equal to the chain tip {chain_height:?}"), + None::<()>, + )); } Ok(()) @@ -2230,8 +2185,10 @@ pub fn height_from_signed_int(index: i32, tip_height: Height) -> Result if index >= 0 { let height = index.try_into().expect("Positive i32 always fits in u32"); if height > tip_height.0 { - return Err(Error::invalid_params( + return Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), "Provided index is greater than the current tip", + None, )); } Ok(Height(height)) @@ -2242,17 +2199,27 @@ pub fn height_from_signed_int(index: i32, tip_height: Height) -> Result .checked_add(index + 1); let sanitized_height = match height { - None => return Err(Error::invalid_params("Provided index is not valid")), + None => { + return Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), + "Provided index is not valid", + None, + )) + } Some(h) => { if h < 0 { - return Err(Error::invalid_params( + return Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), "Provided negative index ends up with a negative height", + None, )); } let h: u32 = h.try_into().expect("Positive i32 always fits in u32"); if h > tip_height.0 { - return Err(Error::invalid_params( + return Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), "Provided index is greater than the current tip", + None, )); } diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index 42c5d282bed..2bb9a0ca393 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -2,9 +2,10 @@ use std::{fmt::Debug, sync::Arc, time::Duration}; -use futures::{future::OptionFuture, FutureExt, TryFutureExt}; -use jsonrpc_core::{self, BoxFuture, Error, ErrorCode, Result}; -use jsonrpc_derive::rpc; +use futures::{future::OptionFuture, TryFutureExt}; +use jsonrpsee::core::{async_trait, RpcResult as Result}; +use jsonrpsee_proc_macros::rpc; +use jsonrpsee_types::ErrorObject; use tower::{Service, ServiceExt}; use zcash_address::{unified::Encoding, TryFromAddress}; @@ -83,7 +84,7 @@ pub trait GetBlockTemplateRpc { /// # Notes /// /// This rpc method is available only if zebra is built with `--features getblocktemplate-rpcs`. - #[rpc(name = "getblockcount")] + #[method(name = "getblockcount")] fn get_block_count(&self) -> Result; /// Returns the hash of the block of a given height iff the index argument correspond @@ -102,8 +103,8 @@ pub trait GetBlockTemplateRpc { /// - If `index` is positive then index = block height. /// - If `index` is negative then -1 is the last known valid block. /// - This rpc method is available only if zebra is built with `--features getblocktemplate-rpcs`. - #[rpc(name = "getblockhash")] - fn get_block_hash(&self, index: i32) -> BoxFuture>; + #[method(name = "getblockhash")] + async fn get_block_hash(&self, index: i32) -> Result; /// Returns a block template for mining new Zcash blocks. /// @@ -128,11 +129,11 @@ pub trait GetBlockTemplateRpc { /// so moving between chains and forking chains is very cheap. /// /// This rpc method is available only if zebra is built with `--features getblocktemplate-rpcs`. - #[rpc(name = "getblocktemplate")] - fn get_block_template( + #[method(name = "getblocktemplate")] + async fn get_block_template( &self, parameters: Option, - ) -> BoxFuture>; + ) -> Result; /// Submits block to the node to be validated and committed. /// Returns the [`submit_block::Response`] for the operation, as a JSON string. @@ -149,20 +150,20 @@ pub trait GetBlockTemplateRpc { /// # Notes /// /// - `jsonparametersobject` holds a single field, workid, that must be included in submissions if provided by the server. - #[rpc(name = "submitblock")] - fn submit_block( + #[method(name = "submitblock")] + async fn submit_block( &self, hex_data: HexData, _parameters: Option, - ) -> BoxFuture>; + ) -> Result; /// Returns mining-related information. /// /// zcashd reference: [`getmininginfo`](https://zcash.github.io/rpc/getmininginfo.html) /// method: post /// tags: mining - #[rpc(name = "getmininginfo")] - fn get_mining_info(&self) -> BoxFuture>; + #[method(name = "getmininginfo")] + async fn get_mining_info(&self) -> Result; /// Returns the estimated network solutions per second based on the last `num_blocks` before /// `height`. @@ -174,12 +175,9 @@ pub trait GetBlockTemplateRpc { /// zcashd reference: [`getnetworksolps`](https://zcash.github.io/rpc/getnetworksolps.html) /// method: post /// tags: mining - #[rpc(name = "getnetworksolps")] - fn get_network_sol_ps( - &self, - num_blocks: Option, - height: Option, - ) -> BoxFuture>; + #[method(name = "getnetworksolps")] + async fn get_network_sol_ps(&self, num_blocks: Option, height: Option) + -> Result; /// Returns the estimated network solutions per second based on the last `num_blocks` before /// `height`. @@ -190,13 +188,13 @@ pub trait GetBlockTemplateRpc { /// zcashd reference: [`getnetworkhashps`](https://zcash.github.io/rpc/getnetworkhashps.html) /// method: post /// tags: mining - #[rpc(name = "getnetworkhashps")] - fn get_network_hash_ps( + #[method(name = "getnetworkhashps")] + async fn get_network_hash_ps( &self, num_blocks: Option, height: Option, - ) -> BoxFuture> { - self.get_network_sol_ps(num_blocks, height) + ) -> Result { + self.get_network_sol_ps(num_blocks, height).await } /// Returns data about each connected network node. @@ -204,8 +202,8 @@ pub trait GetBlockTemplateRpc { /// zcashd reference: [`getpeerinfo`](https://zcash.github.io/rpc/getpeerinfo.html) /// method: post /// tags: network - #[rpc(name = "getpeerinfo")] - fn get_peer_info(&self) -> BoxFuture>>; + #[method(name = "getpeerinfo")] + async fn get_peer_info(&self) -> Result>; /// Checks if a zcash address is valid. /// Returns information about the given address if valid. @@ -221,8 +219,8 @@ pub trait GetBlockTemplateRpc { /// # Notes /// /// - No notes - #[rpc(name = "validateaddress")] - fn validate_address(&self, address: String) -> BoxFuture>; + #[method(name = "validateaddress")] + async fn validate_address(&self, address: String) -> Result; /// Checks if a zcash address is valid. /// Returns information about the given address if valid. @@ -238,11 +236,11 @@ pub trait GetBlockTemplateRpc { /// # Notes /// /// - No notes - #[rpc(name = "z_validateaddress")] - fn z_validate_address( + #[method(name = "z_validateaddress")] + async fn z_validate_address( &self, address: String, - ) -> BoxFuture>; + ) -> Result; /// Returns the block subsidy reward of the block at `height`, taking into account the mining slow start. /// Returns an error if `height` is less than the height of the first halving for the current network. @@ -258,16 +256,16 @@ pub trait GetBlockTemplateRpc { /// # Notes /// /// If `height` is not supplied, uses the tip height. - #[rpc(name = "getblocksubsidy")] - fn get_block_subsidy(&self, height: Option) -> BoxFuture>; + #[method(name = "getblocksubsidy")] + async fn get_block_subsidy(&self, height: Option) -> Result; /// Returns the proof-of-work difficulty as a multiple of the minimum difficulty. /// /// zcashd reference: [`getdifficulty`](https://zcash.github.io/rpc/getdifficulty.html) /// method: post /// tags: blockchain - #[rpc(name = "getdifficulty")] - fn get_difficulty(&self) -> BoxFuture>; + #[method(name = "getdifficulty")] + async fn get_difficulty(&self) -> Result; /// Returns the list of individual payment addresses given a unified address. /// @@ -282,13 +280,10 @@ pub trait GetBlockTemplateRpc { /// # Notes /// /// - No notes - #[rpc(name = "z_listunifiedreceivers")] - fn z_list_unified_receivers( - &self, - address: String, - ) -> BoxFuture>; + #[method(name = "z_listunifiedreceivers")] + async fn z_list_unified_receivers(&self, address: String) -> Result; - #[rpc(name = "generate")] + #[method(name = "generate")] /// Mine blocks immediately. Returns the block hashes of the generated blocks. /// /// # Parameters @@ -302,7 +297,7 @@ pub trait GetBlockTemplateRpc { /// zcashd reference: [`generate`](https://zcash.github.io/rpc/generate.html) /// method: post /// tags: generating - fn generate(&self, num_blocks: u32) -> BoxFuture>>; + async fn generate(&self, num_blocks: u32) -> Result>; } /// RPC method implementations. @@ -536,7 +531,8 @@ where } } -impl GetBlockTemplateRpc +#[async_trait] +impl GetBlockTemplateRpcServer for GetBlockTemplateRpcImpl where Mempool: Service< @@ -571,40 +567,37 @@ where best_chain_tip_height(&self.latest_chain_tip).map(|height| height.0) } - fn get_block_hash(&self, index: i32) -> BoxFuture> { + async fn get_block_hash(&self, index: i32) -> Result { let mut state = self.state.clone(); let latest_chain_tip = self.latest_chain_tip.clone(); - async move { - // TODO: look up this height as part of the state request? - let tip_height = best_chain_tip_height(&latest_chain_tip)?; - - let height = height_from_signed_int(index, tip_height)?; - - let request = zebra_state::ReadRequest::BestChainBlockHash(height); - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_error(server::error::LegacyCode::default())?; - - match response { - zebra_state::ReadResponse::BlockHash(Some(hash)) => Ok(GetBlockHash(hash)), - zebra_state::ReadResponse::BlockHash(None) => Err(Error { - code: server::error::LegacyCode::InvalidParameter.into(), - message: "Block not found".to_string(), - data: None, - }), - _ => unreachable!("unmatched response to a block request"), - } + // TODO: look up this height as part of the state request? + let tip_height = best_chain_tip_height(&latest_chain_tip)?; + + let height = height_from_signed_int(index, tip_height)?; + + let request = zebra_state::ReadRequest::BestChainBlockHash(height); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_error(server::error::LegacyCode::default())?; + + match response { + zebra_state::ReadResponse::BlockHash(Some(hash)) => Ok(GetBlockHash(hash)), + zebra_state::ReadResponse::BlockHash(None) => Err(ErrorObject::borrowed( + server::error::LegacyCode::InvalidParameter.into(), + "Block not found", + None, + )), + _ => unreachable!("unmatched response to a block request"), } - .boxed() } - fn get_block_template( + async fn get_block_template( &self, parameters: Option, - ) -> BoxFuture> { + ) -> Result { // Clone Configs let network = self.network.clone(); let miner_address = self.miner_address.clone(); @@ -628,399 +621,392 @@ where latest_chain_tip, sync_status, ) - .boxed(); + .await; } // To implement long polling correctly, we split this RPC into multiple phases. - async move { - get_block_template::check_parameters(¶meters)?; - - let client_long_poll_id = parameters.as_ref().and_then(|params| params.long_poll_id); - - // - One-off checks - - // Check config and parameters. - // These checks always have the same result during long polling. - let miner_address = check_miner_address(miner_address)?; + get_block_template::check_parameters(¶meters)?; + + let client_long_poll_id = parameters.as_ref().and_then(|params| params.long_poll_id); + + // - One-off checks + + // Check config and parameters. + // These checks always have the same result during long polling. + let miner_address = check_miner_address(miner_address)?; + + // - Checks and fetches that can change during long polling + // + // Set up the loop. + let mut max_time_reached = false; + + // The loop returns the server long poll ID, + // which should be different to the client long poll ID. + let ( + server_long_poll_id, + chain_tip_and_local_time, + mempool_txs, + mempool_tx_deps, + submit_old, + ) = loop { + // Check if we are synced to the tip. + // The result of this check can change during long polling. + // + // Optional TODO: + // - add `async changed()` method to ChainSyncStatus (like `ChainTip`) + check_synced_to_tip(&network, latest_chain_tip.clone(), sync_status.clone())?; + // TODO: return an error if we have no peers, like `zcashd` does, + // and add a developer config that mines regardless of how many peers we have. + // https://github.com/zcash/zcash/blob/6fdd9f1b81d3b228326c9826fa10696fc516444b/src/miner.cpp#L865-L880 + + // We're just about to fetch state data, then maybe wait for any changes. + // Mark all the changes before the fetch as seen. + // Changes are also ignored in any clones made after the mark. + latest_chain_tip.mark_best_tip_seen(); + + // Fetch the state data and local time for the block template: + // - if the tip block hash changes, we must return from long polling, + // - if the local clock changes on testnet, we might return from long polling + // + // We always return after 90 minutes on mainnet, even if we have the same response, + // because the max time has been reached. + let chain_tip_and_local_time @ zebra_state::GetBlockTemplateChainInfo { + tip_hash, + tip_height, + max_time, + cur_time, + .. + } = fetch_state_tip_and_local_time(state.clone()).await?; - // - Checks and fetches that can change during long polling + // Fetch the mempool data for the block template: + // - if the mempool transactions change, we might return from long polling. // - // Set up the loop. - let mut max_time_reached = false; - - // The loop returns the server long poll ID, - // which should be different to the client long poll ID. - let ( - server_long_poll_id, - chain_tip_and_local_time, - mempool_txs, - mempool_tx_deps, - submit_old, - ) = loop { - // Check if we are synced to the tip. - // The result of this check can change during long polling. - // - // Optional TODO: - // - add `async changed()` method to ChainSyncStatus (like `ChainTip`) - check_synced_to_tip(&network, latest_chain_tip.clone(), sync_status.clone())?; - // TODO: return an error if we have no peers, like `zcashd` does, - // and add a developer config that mines regardless of how many peers we have. - // https://github.com/zcash/zcash/blob/6fdd9f1b81d3b228326c9826fa10696fc516444b/src/miner.cpp#L865-L880 - - // We're just about to fetch state data, then maybe wait for any changes. - // Mark all the changes before the fetch as seen. - // Changes are also ignored in any clones made after the mark. - latest_chain_tip.mark_best_tip_seen(); - - // Fetch the state data and local time for the block template: - // - if the tip block hash changes, we must return from long polling, - // - if the local clock changes on testnet, we might return from long polling - // - // We always return after 90 minutes on mainnet, even if we have the same response, - // because the max time has been reached. - let chain_tip_and_local_time @ zebra_state::GetBlockTemplateChainInfo { - tip_hash, - tip_height, - max_time, - cur_time, - .. - } = fetch_state_tip_and_local_time(state.clone()).await?; - - // Fetch the mempool data for the block template: - // - if the mempool transactions change, we might return from long polling. - // - // If the chain fork has just changed, miners want to get the new block as fast - // as possible, rather than wait for transactions to re-verify. This increases - // miner profits (and any delays can cause chain forks). So we don't wait between - // the chain tip changing and getting mempool transactions. - // - // Optional TODO: - // - add a `MempoolChange` type with an `async changed()` method (like `ChainTip`) - let Some((mempool_txs, mempool_tx_deps)) = - fetch_mempool_transactions(mempool.clone(), tip_hash) - .await? - // If the mempool and state responses are out of sync: - // - if we are not long polling, omit mempool transactions from the template, - // - if we are long polling, continue to the next iteration of the loop to make fresh state and mempool requests. - .or_else(|| client_long_poll_id.is_none().then(Default::default)) - else { - continue; - }; - - // - Long poll ID calculation - let server_long_poll_id = LongPollInput::new( - tip_height, - tip_hash, - max_time, - mempool_txs.iter().map(|tx| tx.transaction.id), - ) - .generate_id(); - - // The loop finishes if: - // - the client didn't pass a long poll ID, - // - the server long poll ID is different to the client long poll ID, or - // - the previous loop iteration waited until the max time. - if Some(&server_long_poll_id) != client_long_poll_id.as_ref() || max_time_reached { - let mut submit_old = client_long_poll_id - .as_ref() - .map(|old_long_poll_id| server_long_poll_id.submit_old(old_long_poll_id)); - - // On testnet, the max time changes the block difficulty, so old shares are - // invalid. On mainnet, this means there has been 90 minutes without a new - // block or mempool transaction, which is very unlikely. So the miner should - // probably reset anyway. - if max_time_reached { - submit_old = Some(false); - } + // If the chain fork has just changed, miners want to get the new block as fast + // as possible, rather than wait for transactions to re-verify. This increases + // miner profits (and any delays can cause chain forks). So we don't wait between + // the chain tip changing and getting mempool transactions. + // + // Optional TODO: + // - add a `MempoolChange` type with an `async changed()` method (like `ChainTip`) + let Some((mempool_txs, mempool_tx_deps)) = + fetch_mempool_transactions(mempool.clone(), tip_hash) + .await? + // If the mempool and state responses are out of sync: + // - if we are not long polling, omit mempool transactions from the template, + // - if we are long polling, continue to the next iteration of the loop to make fresh state and mempool requests. + .or_else(|| client_long_poll_id.is_none().then(Default::default)) + else { + continue; + }; - break ( - server_long_poll_id, - chain_tip_and_local_time, - mempool_txs, - mempool_tx_deps, - submit_old, - ); + // - Long poll ID calculation + let server_long_poll_id = LongPollInput::new( + tip_height, + tip_hash, + max_time, + mempool_txs.iter().map(|tx| tx.transaction.id), + ) + .generate_id(); + + // The loop finishes if: + // - the client didn't pass a long poll ID, + // - the server long poll ID is different to the client long poll ID, or + // - the previous loop iteration waited until the max time. + if Some(&server_long_poll_id) != client_long_poll_id.as_ref() || max_time_reached { + let mut submit_old = client_long_poll_id + .as_ref() + .map(|old_long_poll_id| server_long_poll_id.submit_old(old_long_poll_id)); + + // On testnet, the max time changes the block difficulty, so old shares are + // invalid. On mainnet, this means there has been 90 minutes without a new + // block or mempool transaction, which is very unlikely. So the miner should + // probably reset anyway. + if max_time_reached { + submit_old = Some(false); } - // - Polling wait conditions - // - // TODO: when we're happy with this code, split it into a function. - // - // Periodically check the mempool for changes. - // - // Optional TODO: - // Remove this polling wait if we switch to using futures to detect sync status - // and mempool changes. - let wait_for_mempool_request = tokio::time::sleep(Duration::from_secs( - GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, - )); + break ( + server_long_poll_id, + chain_tip_and_local_time, + mempool_txs, + mempool_tx_deps, + submit_old, + ); + } - // Return immediately if the chain tip has changed. - // The clone preserves the seen status of the chain tip. - let mut wait_for_best_tip_change = latest_chain_tip.clone(); - let wait_for_best_tip_change = wait_for_best_tip_change.best_tip_changed(); - - // Wait for the maximum block time to elapse. This can change the block header - // on testnet. (On mainnet it can happen due to a network disconnection, or a - // rapid drop in hash rate.) - // - // This duration might be slightly lower than the actual maximum, - // if cur_time was clamped to min_time. In that case the wait is very long, - // and it's ok to return early. - // - // It can also be zero if cur_time was clamped to max_time. In that case, - // we want to wait for another change, and ignore this timeout. So we use an - // `OptionFuture::None`. - let duration_until_max_time = max_time.saturating_duration_since(cur_time); - let wait_for_max_time: OptionFuture<_> = if duration_until_max_time.seconds() > 0 { - Some(tokio::time::sleep(duration_until_max_time.to_std())) - } else { - None + // - Polling wait conditions + // + // TODO: when we're happy with this code, split it into a function. + // + // Periodically check the mempool for changes. + // + // Optional TODO: + // Remove this polling wait if we switch to using futures to detect sync status + // and mempool changes. + let wait_for_mempool_request = tokio::time::sleep(Duration::from_secs( + GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, + )); + + // Return immediately if the chain tip has changed. + // The clone preserves the seen status of the chain tip. + let mut wait_for_best_tip_change = latest_chain_tip.clone(); + let wait_for_best_tip_change = wait_for_best_tip_change.best_tip_changed(); + + // Wait for the maximum block time to elapse. This can change the block header + // on testnet. (On mainnet it can happen due to a network disconnection, or a + // rapid drop in hash rate.) + // + // This duration might be slightly lower than the actual maximum, + // if cur_time was clamped to min_time. In that case the wait is very long, + // and it's ok to return early. + // + // It can also be zero if cur_time was clamped to max_time. In that case, + // we want to wait for another change, and ignore this timeout. So we use an + // `OptionFuture::None`. + let duration_until_max_time = max_time.saturating_duration_since(cur_time); + let wait_for_max_time: OptionFuture<_> = if duration_until_max_time.seconds() > 0 { + Some(tokio::time::sleep(duration_until_max_time.to_std())) + } else { + None + } + .into(); + + // Optional TODO: + // `zcashd` generates the next coinbase transaction while waiting for changes. + // When Zebra supports shielded coinbase, we might want to do this in parallel. + // But the coinbase value depends on the selected transactions, so this needs + // further analysis to check if it actually saves us any time. + + tokio::select! { + // Poll the futures in the listed order, for efficiency. + // We put the most frequent conditions first. + biased; + + // This timer elapses every few seconds + _elapsed = wait_for_mempool_request => { + tracing::debug!( + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, + "checking for a new mempool change after waiting a few seconds" + ); } - .into(); - - // Optional TODO: - // `zcashd` generates the next coinbase transaction while waiting for changes. - // When Zebra supports shielded coinbase, we might want to do this in parallel. - // But the coinbase value depends on the selected transactions, so this needs - // further analysis to check if it actually saves us any time. - - tokio::select! { - // Poll the futures in the listed order, for efficiency. - // We put the most frequent conditions first. - biased; - - // This timer elapses every few seconds - _elapsed = wait_for_mempool_request => { - tracing::debug!( - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, - "checking for a new mempool change after waiting a few seconds" - ); - } - - // The state changes after around a target block interval (75s) - tip_changed_result = wait_for_best_tip_change => { - match tip_changed_result { - Ok(()) => { - // Spurious updates shouldn't happen in the state, because the - // difficulty and hash ordering is a stable total order. But - // since they could cause a busy-loop, guard against them here. - latest_chain_tip.mark_best_tip_seen(); - - let new_tip_hash = latest_chain_tip.best_tip_hash(); - if new_tip_hash == Some(tip_hash) { - tracing::debug!( - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - ?tip_hash, - ?tip_height, - "ignoring spurious state change notification" - ); - - // Wait for the mempool interval, then check for any changes. - tokio::time::sleep(Duration::from_secs( - GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, - )).await; - - continue; - } + // The state changes after around a target block interval (75s) + tip_changed_result = wait_for_best_tip_change => { + match tip_changed_result { + Ok(()) => { + // Spurious updates shouldn't happen in the state, because the + // difficulty and hash ordering is a stable total order. But + // since they could cause a busy-loop, guard against them here. + latest_chain_tip.mark_best_tip_seen(); + + let new_tip_hash = latest_chain_tip.best_tip_hash(); + if new_tip_hash == Some(tip_hash) { tracing::debug!( ?max_time, ?cur_time, ?server_long_poll_id, ?client_long_poll_id, - "returning from long poll because state has changed" + ?tip_hash, + ?tip_height, + "ignoring spurious state change notification" ); - } - Err(recv_error) => { - // This log is rare and helps with debugging, so it's ok to be info. - tracing::info!( - ?recv_error, - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - "returning from long poll due to a state error.\ - Is Zebra shutting down?" - ); + // Wait for the mempool interval, then check for any changes. + tokio::time::sleep(Duration::from_secs( + GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, + )).await; - return Err(recv_error).map_error(server::error::LegacyCode::default()); + continue; } + + tracing::debug!( + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + "returning from long poll because state has changed" + ); } - } - // The max time does not elapse during normal operation on mainnet, - // and it rarely elapses on testnet. - Some(_elapsed) = wait_for_max_time => { - // This log is very rare so it's ok to be info. - tracing::info!( - ?max_time, - ?cur_time, - ?server_long_poll_id, - ?client_long_poll_id, - "returning from long poll because max time was reached" - ); - - max_time_reached = true; + Err(recv_error) => { + // This log is rare and helps with debugging, so it's ok to be info. + tracing::info!( + ?recv_error, + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + "returning from long poll due to a state error.\ + Is Zebra shutting down?" + ); + + return Err(recv_error).map_error(server::error::LegacyCode::default()); + } } } - }; - // - Processing fetched data to create a transaction template - // - // Apart from random weighted transaction selection, - // the template only depends on the previously fetched data. - // This processing never fails. - - // Calculate the next block height. - let next_block_height = - (chain_tip_and_local_time.tip_height + 1).expect("tip is far below Height::MAX"); - - tracing::debug!( - mempool_tx_hashes = ?mempool_txs - .iter() - .map(|tx| tx.transaction.id.mined_id()) - .collect::>(), - "selecting transactions for the template from the mempool" - ); + // The max time does not elapse during normal operation on mainnet, + // and it rarely elapses on testnet. + Some(_elapsed) = wait_for_max_time => { + // This log is very rare so it's ok to be info. + tracing::info!( + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + "returning from long poll because max time was reached" + ); - // Randomly select some mempool transactions. - let mempool_txs = zip317::select_mempool_transactions( - &network, - next_block_height, - &miner_address, - mempool_txs, - mempool_tx_deps, - debug_like_zcashd, - extra_coinbase_data.clone(), - ); + max_time_reached = true; + } + } + }; + + // - Processing fetched data to create a transaction template + // + // Apart from random weighted transaction selection, + // the template only depends on the previously fetched data. + // This processing never fails. + + // Calculate the next block height. + let next_block_height = + (chain_tip_and_local_time.tip_height + 1).expect("tip is far below Height::MAX"); + + tracing::debug!( + mempool_tx_hashes = ?mempool_txs + .iter() + .map(|tx| tx.transaction.id.mined_id()) + .collect::>(), + "selecting transactions for the template from the mempool" + ); - tracing::debug!( - selected_mempool_tx_hashes = ?mempool_txs - .iter() - .map(|#[cfg(not(test))] tx, #[cfg(test)] (_, tx)| tx.transaction.id.mined_id()) - .collect::>(), - "selected transactions for the template from the mempool" - ); + // Randomly select some mempool transactions. + let mempool_txs = zip317::select_mempool_transactions( + &network, + next_block_height, + &miner_address, + mempool_txs, + mempool_tx_deps, + debug_like_zcashd, + extra_coinbase_data.clone(), + ); - // - After this point, the template only depends on the previously fetched data. - - let response = GetBlockTemplate::new( - &network, - &miner_address, - &chain_tip_and_local_time, - server_long_poll_id, - mempool_txs, - submit_old, - debug_like_zcashd, - extra_coinbase_data, - ); + tracing::debug!( + selected_mempool_tx_hashes = ?mempool_txs + .iter() + .map(|#[cfg(not(test))] tx, #[cfg(test)] (_, tx)| tx.transaction.id.mined_id()) + .collect::>(), + "selected transactions for the template from the mempool" + ); - Ok(response.into()) - } - .boxed() + // - After this point, the template only depends on the previously fetched data. + + let response = GetBlockTemplate::new( + &network, + &miner_address, + &chain_tip_and_local_time, + server_long_poll_id, + mempool_txs, + submit_old, + debug_like_zcashd, + extra_coinbase_data, + ); + + Ok(response.into()) } - fn submit_block( + async fn submit_block( &self, HexData(block_bytes): HexData, _parameters: Option, - ) -> BoxFuture> { + ) -> Result { let mut block_verifier_router = self.block_verifier_router.clone(); - async move { - let block: Block = match block_bytes.zcash_deserialize_into() { - Ok(block_bytes) => block_bytes, - Err(error) => { - tracing::info!(?error, "submit block failed: block bytes could not be deserialized into a structurally valid block"); - - return Ok(submit_block::ErrorResponse::Rejected.into()); - } - }; + let block: Block = match block_bytes.zcash_deserialize_into() { + Ok(block_bytes) => block_bytes, + Err(error) => { + tracing::info!(?error, "submit block failed: block bytes could not be deserialized into a structurally valid block"); - let block_height = block - .coinbase_height() - .map(|height| height.0.to_string()) - .unwrap_or_else(|| "invalid coinbase height".to_string()); - let block_hash = block.hash(); + return Ok(submit_block::ErrorResponse::Rejected.into()); + } + }; + + let block_height = block + .coinbase_height() + .map(|height| height.0.to_string()) + .unwrap_or_else(|| "invalid coinbase height".to_string()); + let block_hash = block.hash(); + + let block_verifier_router_response = block_verifier_router + .ready() + .await + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))? + .call(zebra_consensus::Request::Commit(Arc::new(block))) + .await; + + let chain_error = match block_verifier_router_response { + // Currently, this match arm returns `null` (Accepted) for blocks committed + // to any chain, but Accepted is only for blocks in the best chain. + // + // TODO (#5487): + // - Inconclusive: check if the block is on a side-chain + // The difference is important to miners, because they want to mine on the best chain. + Ok(block_hash) => { + tracing::info!(?block_hash, ?block_height, "submit block accepted"); + return Ok(submit_block::Response::Accepted); + } - let block_verifier_router_response = block_verifier_router - .ready() - .await - .map_err(|error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - })? - .call(zebra_consensus::Request::Commit(Arc::new(block))) - .await; - - let chain_error = match block_verifier_router_response { - // Currently, this match arm returns `null` (Accepted) for blocks committed - // to any chain, but Accepted is only for blocks in the best chain. - // - // TODO (#5487): - // - Inconclusive: check if the block is on a side-chain - // The difference is important to miners, because they want to mine on the best chain. - Ok(block_hash) => { - tracing::info!(?block_hash, ?block_height, "submit block accepted"); - return Ok(submit_block::Response::Accepted); - } + // Turns BoxError into Result, + // by downcasting from Any to VerifyChainError. + Err(box_error) => { + let error = box_error + .downcast::() + .map(|boxed_chain_error| *boxed_chain_error); - // Turns BoxError into Result, - // by downcasting from Any to VerifyChainError. - Err(box_error) => { - let error = box_error - .downcast::() - .map(|boxed_chain_error| *boxed_chain_error); + tracing::info!( + ?error, + ?block_hash, + ?block_height, + "submit block failed verification" + ); - tracing::info!(?error, ?block_hash, ?block_height, "submit block failed verification"); + error + } + }; - error - } - }; + let response = match chain_error { + Ok(source) if source.is_duplicate_request() => submit_block::ErrorResponse::Duplicate, - let response = match chain_error { - Ok(source) if source.is_duplicate_request() => { - submit_block::ErrorResponse::Duplicate - } + // Currently, these match arms return Reject for the older duplicate in a queue, + // but queued duplicates should be DuplicateInconclusive. + // + // Optional TODO (#5487): + // - DuplicateInconclusive: turn these non-finalized state duplicate block errors + // into BlockError enum variants, and handle them as DuplicateInconclusive: + // - "block already sent to be committed to the state" + // - "replaced by newer request" + // - keep the older request in the queue, + // and return a duplicate error for the newer request immediately. + // This improves the speed of the RPC response. + // + // Checking the download queues and BlockVerifierRouter buffer for duplicates + // might require architectural changes to Zebra, so we should only do it + // if mining pools really need it. + Ok(_verify_chain_error) => submit_block::ErrorResponse::Rejected, - // Currently, these match arms return Reject for the older duplicate in a queue, - // but queued duplicates should be DuplicateInconclusive. - // - // Optional TODO (#5487): - // - DuplicateInconclusive: turn these non-finalized state duplicate block errors - // into BlockError enum variants, and handle them as DuplicateInconclusive: - // - "block already sent to be committed to the state" - // - "replaced by newer request" - // - keep the older request in the queue, - // and return a duplicate error for the newer request immediately. - // This improves the speed of the RPC response. - // - // Checking the download queues and BlockVerifierRouter buffer for duplicates - // might require architectural changes to Zebra, so we should only do it - // if mining pools really need it. - Ok(_verify_chain_error) => submit_block::ErrorResponse::Rejected, - - // This match arm is currently unreachable, but if future changes add extra error types, - // we want to turn them into `Rejected`. - Err(_unknown_error_type) => submit_block::ErrorResponse::Rejected, - }; + // This match arm is currently unreachable, but if future changes add extra error types, + // we want to turn them into `Rejected`. + Err(_unknown_error_type) => submit_block::ErrorResponse::Rejected, + }; - Ok(response.into()) - } - .boxed() + Ok(response.into()) } - fn get_mining_info(&self) -> BoxFuture> { + async fn get_mining_info(&self) -> Result { let network = self.network.clone(); let mut state = self.state.clone(); @@ -1035,38 +1021,35 @@ where } let solution_rate_fut = self.get_network_sol_ps(None, None); - async move { - // Get the current block size. - let mut current_block_size = None; - if tip_height > 0 { - let request = zebra_state::ReadRequest::TipBlockSize; - let response: zebra_state::ReadResponse = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_error(server::error::LegacyCode::default())?; - current_block_size = match response { - zebra_state::ReadResponse::TipBlockSize(Some(block_size)) => Some(block_size), - _ => None, - }; - } - - Ok(get_mining_info::Response::new( - tip_height, - current_block_size, - current_block_tx, - network, - solution_rate_fut.await?, - )) + // Get the current block size. + let mut current_block_size = None; + if tip_height > 0 { + let request = zebra_state::ReadRequest::TipBlockSize; + let response: zebra_state::ReadResponse = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_error(server::error::LegacyCode::default())?; + current_block_size = match response { + zebra_state::ReadResponse::TipBlockSize(Some(block_size)) => Some(block_size), + _ => None, + }; } - .boxed() + + Ok(get_mining_info::Response::new( + tip_height, + current_block_size, + current_block_tx, + network, + solution_rate_fut.await?, + )) } - fn get_network_sol_ps( + async fn get_network_sol_ps( &self, num_blocks: Option, height: Option, - ) -> BoxFuture> { + ) -> Result { // Default number of blocks is 120 if not supplied. let mut num_blocks = num_blocks.unwrap_or(DEFAULT_SOLUTION_RATE_WINDOW_SIZE); // But if it is 0 or negative, it uses the proof of work averaging window. @@ -1082,346 +1065,296 @@ where let mut state = self.state.clone(); - async move { - let request = ReadRequest::SolutionRate { num_blocks, height }; + let request = ReadRequest::SolutionRate { num_blocks, height }; - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_err(|error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - })?; + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; - let solution_rate = match response { - // zcashd returns a 0 rate when the calculation is invalid - ReadResponse::SolutionRate(solution_rate) => solution_rate.unwrap_or(0), + let solution_rate = match response { + // zcashd returns a 0 rate when the calculation is invalid + ReadResponse::SolutionRate(solution_rate) => solution_rate.unwrap_or(0), - _ => unreachable!("unmatched response to a solution rate request"), - }; + _ => unreachable!("unmatched response to a solution rate request"), + }; - Ok(solution_rate - .try_into() - .expect("per-second solution rate always fits in u64")) - } - .boxed() + Ok(solution_rate + .try_into() + .expect("per-second solution rate always fits in u64")) } - fn get_peer_info(&self) -> BoxFuture>> { + async fn get_peer_info(&self) -> Result> { let address_book = self.address_book.clone(); - async move { - Ok(address_book - .recently_live_peers(chrono::Utc::now()) - .into_iter() - .map(PeerInfo::from) - .collect()) - } - .boxed() + Ok(address_book + .recently_live_peers(chrono::Utc::now()) + .into_iter() + .map(PeerInfo::from) + .collect()) } - fn validate_address( - &self, - raw_address: String, - ) -> BoxFuture> { + async fn validate_address(&self, raw_address: String) -> Result { let network = self.network.clone(); - async move { - let Ok(address) = raw_address - .parse::() else { - return Ok(validate_address::Response::invalid()); - }; - - let address = match address - .convert::() { - Ok(address) => address, - Err(err) => { - tracing::debug!(?err, "conversion error"); - return Ok(validate_address::Response::invalid()); - } - }; + let Ok(address) = raw_address.parse::() else { + return Ok(validate_address::Response::invalid()); + }; - // we want to match zcashd's behaviour - if !address.is_transparent() { + let address = match address.convert::() { + Ok(address) => address, + Err(err) => { + tracing::debug!(?err, "conversion error"); return Ok(validate_address::Response::invalid()); } + }; - if address.network() == network.kind() { - Ok(validate_address::Response { - address: Some(raw_address), - is_valid: true, - is_script: Some(address.is_script_hash()), - }) - } else { - tracing::info!( - ?network, - address_network = ?address.network(), - "invalid address in validateaddress RPC: Zebra's configured network must match address network" - ); + // we want to match zcashd's behaviour + if !address.is_transparent() { + return Ok(validate_address::Response::invalid()); + } - Ok(validate_address::Response::invalid()) - } + if address.network() == network.kind() { + Ok(validate_address::Response { + address: Some(raw_address), + is_valid: true, + is_script: Some(address.is_script_hash()), + }) + } else { + tracing::info!( + ?network, + address_network = ?address.network(), + "invalid address in validateaddress RPC: Zebra's configured network must match address network" + ); + + Ok(validate_address::Response::invalid()) } - .boxed() } - fn z_validate_address( + async fn z_validate_address( &self, raw_address: String, - ) -> BoxFuture> { + ) -> Result { let network = self.network.clone(); - async move { - let Ok(address) = raw_address - .parse::() else { - return Ok(z_validate_address::Response::invalid()); - }; - - let address = match address - .convert::() { - Ok(address) => address, - Err(err) => { - tracing::debug!(?err, "conversion error"); - return Ok(z_validate_address::Response::invalid()); - } - }; - - if address.network() == network.kind() { - Ok(z_validate_address::Response { - is_valid: true, - address: Some(raw_address), - address_type: Some(z_validate_address::AddressType::from(&address)), - is_mine: Some(false), - }) - } else { - tracing::info!( - ?network, - address_network = ?address.network(), - "invalid address network in z_validateaddress RPC: address is for {:?} but Zebra is on {:?}", - address.network(), - network - ); + let Ok(address) = raw_address.parse::() else { + return Ok(z_validate_address::Response::invalid()); + }; - Ok(z_validate_address::Response::invalid()) + let address = match address.convert::() { + Ok(address) => address, + Err(err) => { + tracing::debug!(?err, "conversion error"); + return Ok(z_validate_address::Response::invalid()); } + }; + + if address.network() == network.kind() { + Ok(z_validate_address::Response { + is_valid: true, + address: Some(raw_address), + address_type: Some(z_validate_address::AddressType::from(&address)), + is_mine: Some(false), + }) + } else { + tracing::info!( + ?network, + address_network = ?address.network(), + "invalid address network in z_validateaddress RPC: address is for {:?} but Zebra is on {:?}", + address.network(), + network + ); + + Ok(z_validate_address::Response::invalid()) } - .boxed() } - fn get_block_subsidy(&self, height: Option) -> BoxFuture> { + async fn get_block_subsidy(&self, height: Option) -> Result { let latest_chain_tip = self.latest_chain_tip.clone(); let network = self.network.clone(); - async move { - let height = if let Some(height) = height { - Height(height) - } else { - best_chain_tip_height(&latest_chain_tip)? - }; + let height = if let Some(height) = height { + Height(height) + } else { + best_chain_tip_height(&latest_chain_tip)? + }; + + if height < network.height_for_first_halving() { + return Err(ErrorObject::borrowed( + 0, + "Zebra does not support founders' reward subsidies, \ + use a block height that is after the first halving", + None, + )); + } - if height < network.height_for_first_halving() { - return Err(Error { - code: ErrorCode::ServerError(0), - message: "Zebra does not support founders' reward subsidies, \ - use a block height that is after the first halving" - .into(), - data: None, - }); - } + // Always zero for post-halving blocks + let founders = Amount::zero(); - // Always zero for post-halving blocks - let founders = Amount::zero(); + let total_block_subsidy = + block_subsidy(height, &network).map_error(server::error::LegacyCode::default())?; + let miner_subsidy = miner_subsidy(height, &network, total_block_subsidy) + .map_error(server::error::LegacyCode::default())?; - let total_block_subsidy = - block_subsidy(height, &network).map_error(server::error::LegacyCode::default())?; - let miner_subsidy = miner_subsidy(height, &network, total_block_subsidy) - .map_error(server::error::LegacyCode::default())?; + let (lockbox_streams, mut funding_streams): (Vec<_>, Vec<_>) = + funding_stream_values(height, &network, total_block_subsidy) + .map_error(server::error::LegacyCode::default())? + .into_iter() + // Separate the funding streams into deferred and non-deferred streams + .partition(|(receiver, _)| matches!(receiver, FundingStreamReceiver::Deferred)); + + let is_nu6 = NetworkUpgrade::current(&network, height) == NetworkUpgrade::Nu6; + + let [lockbox_total, funding_streams_total]: [std::result::Result< + Amount, + amount::Error, + >; 2] = [&lockbox_streams, &funding_streams] + .map(|streams| streams.iter().map(|&(_, amount)| amount).sum()); + + // Use the same funding stream order as zcashd + funding_streams.sort_by_key(|(receiver, _funding_stream)| { + ZCASHD_FUNDING_STREAM_ORDER + .iter() + .position(|zcashd_receiver| zcashd_receiver == receiver) + }); - let (lockbox_streams, mut funding_streams): (Vec<_>, Vec<_>) = - funding_stream_values(height, &network, total_block_subsidy) - .map_error(server::error::LegacyCode::default())? + // Format the funding streams and lockbox streams + let [funding_streams, lockbox_streams]: [Vec<_>; 2] = [funding_streams, lockbox_streams] + .map(|streams| { + streams .into_iter() - // Separate the funding streams into deferred and non-deferred streams - .partition(|(receiver, _)| matches!(receiver, FundingStreamReceiver::Deferred)); - - let is_nu6 = NetworkUpgrade::current(&network, height) == NetworkUpgrade::Nu6; - - let [lockbox_total, funding_streams_total]: [std::result::Result< - Amount, - amount::Error, - >; 2] = [&lockbox_streams, &funding_streams] - .map(|streams| streams.iter().map(|&(_, amount)| amount).sum()); - - // Use the same funding stream order as zcashd - funding_streams.sort_by_key(|(receiver, _funding_stream)| { - ZCASHD_FUNDING_STREAM_ORDER - .iter() - .position(|zcashd_receiver| zcashd_receiver == receiver) + .map(|(receiver, value)| { + let address = funding_stream_address(height, &network, receiver); + FundingStream::new(is_nu6, receiver, value, address) + }) + .collect() }); - // Format the funding streams and lockbox streams - let [funding_streams, lockbox_streams]: [Vec<_>; 2] = - [funding_streams, lockbox_streams].map(|streams| { - streams - .into_iter() - .map(|(receiver, value)| { - let address = funding_stream_address(height, &network, receiver); - FundingStream::new(is_nu6, receiver, value, address) - }) - .collect() - }); - - Ok(BlockSubsidy { - miner: miner_subsidy.into(), - founders: founders.into(), - funding_streams, - lockbox_streams, - funding_streams_total: funding_streams_total - .map_error(server::error::LegacyCode::default())? - .into(), - lockbox_total: lockbox_total - .map_error(server::error::LegacyCode::default())? - .into(), - total_block_subsidy: total_block_subsidy.into(), - }) - } - .boxed() + Ok(BlockSubsidy { + miner: miner_subsidy.into(), + founders: founders.into(), + funding_streams, + lockbox_streams, + funding_streams_total: funding_streams_total + .map_error(server::error::LegacyCode::default())? + .into(), + lockbox_total: lockbox_total + .map_error(server::error::LegacyCode::default())? + .into(), + total_block_subsidy: total_block_subsidy.into(), + }) } - fn get_difficulty(&self) -> BoxFuture> { + async fn get_difficulty(&self) -> Result { let network = self.network.clone(); let mut state = self.state.clone(); - async move { - let request = ReadRequest::ChainInfo; - - // # TODO - // - add a separate request like BestChainNextMedianTimePast, but skipping the - // consistency check, because any block's difficulty is ok for display - // - return 1.0 for a "not enough blocks in the state" error, like `zcashd`: - // - let response = state - .ready() - .and_then(|service| service.call(request)) - .await - .map_err(|error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - })?; - - let chain_info = match response { - ReadResponse::ChainInfo(info) => info, - _ => unreachable!("unmatched response to a chain info request"), - }; - - // This RPC is typically used for display purposes, so it is not consensus-critical. - // But it uses the difficulty consensus rules for its calculations. - // - // Consensus: - // https://zips.z.cash/protocol/protocol.pdf#nbits - // - // The zcashd implementation performs to_expanded() on f64, - // and then does an inverse division: - // https://github.com/zcash/zcash/blob/d6e2fada844373a8554ee085418e68de4b593a6c/src/rpc/blockchain.cpp#L46-L73 - // - // But in Zebra we divide the high 128 bits of each expanded difficulty. This gives - // a similar result, because the lower 128 bits are insignificant after conversion - // to `f64` with a 53-bit mantissa. - // - // `pow_limit >> 128 / difficulty >> 128` is the same as the work calculation - // `(2^256 / pow_limit) / (2^256 / difficulty)`, but it's a bit more accurate. - // - // To simplify the calculation, we don't scale for leading zeroes. (Bitcoin's - // difficulty currently uses 68 bits, so even it would still have full precision - // using this calculation.) - - // Get expanded difficulties (256 bits), these are the inverse of the work - let pow_limit: U256 = network.target_difficulty_limit().into(); - let difficulty: U256 = chain_info - .expected_difficulty - .to_expanded() - .expect("valid blocks have valid difficulties") - .into(); - - // Shift out the lower 128 bits (256 bits, but the top 128 are all zeroes) - let pow_limit = pow_limit >> 128; - let difficulty = difficulty >> 128; - - // Convert to u128 then f64. - // We could also convert U256 to String, then parse as f64, but that's slower. - let pow_limit = pow_limit.as_u128() as f64; - let difficulty = difficulty.as_u128() as f64; - - // Invert the division to give approximately: `work(difficulty) / work(pow_limit)` - Ok(pow_limit / difficulty) - } - .boxed() + let request = ReadRequest::ChainInfo; + + // # TODO + // - add a separate request like BestChainNextMedianTimePast, but skipping the + // consistency check, because any block's difficulty is ok for display + // - return 1.0 for a "not enough blocks in the state" error, like `zcashd`: + // + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; + + let chain_info = match response { + ReadResponse::ChainInfo(info) => info, + _ => unreachable!("unmatched response to a chain info request"), + }; + + // This RPC is typically used for display purposes, so it is not consensus-critical. + // But it uses the difficulty consensus rules for its calculations. + // + // Consensus: + // https://zips.z.cash/protocol/protocol.pdf#nbits + // + // The zcashd implementation performs to_expanded() on f64, + // and then does an inverse division: + // https://github.com/zcash/zcash/blob/d6e2fada844373a8554ee085418e68de4b593a6c/src/rpc/blockchain.cpp#L46-L73 + // + // But in Zebra we divide the high 128 bits of each expanded difficulty. This gives + // a similar result, because the lower 128 bits are insignificant after conversion + // to `f64` with a 53-bit mantissa. + // + // `pow_limit >> 128 / difficulty >> 128` is the same as the work calculation + // `(2^256 / pow_limit) / (2^256 / difficulty)`, but it's a bit more accurate. + // + // To simplify the calculation, we don't scale for leading zeroes. (Bitcoin's + // difficulty currently uses 68 bits, so even it would still have full precision + // using this calculation.) + + // Get expanded difficulties (256 bits), these are the inverse of the work + let pow_limit: U256 = network.target_difficulty_limit().into(); + let difficulty: U256 = chain_info + .expected_difficulty + .to_expanded() + .expect("valid blocks have valid difficulties") + .into(); + + // Shift out the lower 128 bits (256 bits, but the top 128 are all zeroes) + let pow_limit = pow_limit >> 128; + let difficulty = difficulty >> 128; + + // Convert to u128 then f64. + // We could also convert U256 to String, then parse as f64, but that's slower. + let pow_limit = pow_limit.as_u128() as f64; + let difficulty = difficulty.as_u128() as f64; + + // Invert the division to give approximately: `work(difficulty) / work(pow_limit)` + Ok(pow_limit / difficulty) } - fn z_list_unified_receivers( - &self, - address: String, - ) -> BoxFuture> { + async fn z_list_unified_receivers(&self, address: String) -> Result { use zcash_address::unified::Container; - async move { - let (network, unified_address): ( - zcash_address::Network, - zcash_address::unified::Address, - ) = zcash_address::unified::Encoding::decode(address.clone().as_str()).map_err( - |error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - }, - )?; + let (network, unified_address): (zcash_address::Network, zcash_address::unified::Address) = + zcash_address::unified::Encoding::decode(address.clone().as_str()) + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; - let mut p2pkh = String::new(); - let mut p2sh = String::new(); - let mut orchard = String::new(); - let mut sapling = String::new(); + let mut p2pkh = String::new(); + let mut p2sh = String::new(); + let mut orchard = String::new(); + let mut sapling = String::new(); - for item in unified_address.items() { - match item { - zcash_address::unified::Receiver::Orchard(_data) => { - let addr = zcash_address::unified::Address::try_from_items(vec![item]) - .expect("using data already decoded as valid"); - orchard = addr.encode(&network); - } - zcash_address::unified::Receiver::Sapling(data) => { - let addr = - zebra_chain::primitives::Address::try_from_sapling(network, data) - .expect("using data already decoded as valid"); - sapling = addr.payment_address().unwrap_or_default(); - } - zcash_address::unified::Receiver::P2pkh(data) => { - let addr = zebra_chain::primitives::Address::try_from_transparent_p2pkh( - network, data, - ) + for item in unified_address.items() { + match item { + zcash_address::unified::Receiver::Orchard(_data) => { + let addr = zcash_address::unified::Address::try_from_items(vec![item]) .expect("using data already decoded as valid"); - p2pkh = addr.payment_address().unwrap_or_default(); - } - zcash_address::unified::Receiver::P2sh(data) => { - let addr = zebra_chain::primitives::Address::try_from_transparent_p2sh( - network, data, - ) + orchard = addr.encode(&network); + } + zcash_address::unified::Receiver::Sapling(data) => { + let addr = zebra_chain::primitives::Address::try_from_sapling(network, data) .expect("using data already decoded as valid"); - p2sh = addr.payment_address().unwrap_or_default(); - } - _ => (), + sapling = addr.payment_address().unwrap_or_default(); + } + zcash_address::unified::Receiver::P2pkh(data) => { + let addr = + zebra_chain::primitives::Address::try_from_transparent_p2pkh(network, data) + .expect("using data already decoded as valid"); + p2pkh = addr.payment_address().unwrap_or_default(); + } + zcash_address::unified::Receiver::P2sh(data) => { + let addr = + zebra_chain::primitives::Address::try_from_transparent_p2sh(network, data) + .expect("using data already decoded as valid"); + p2sh = addr.payment_address().unwrap_or_default(); } + _ => (), } - - Ok(unified_address::Response::new( - orchard, sapling, p2pkh, p2sh, - )) } - .boxed() + + Ok(unified_address::Response::new( + orchard, sapling, p2pkh, p2sh, + )) } - fn generate(&self, num_blocks: u32) -> BoxFuture>> { + async fn generate(&self, num_blocks: u32) -> Result> { let rpc: GetBlockTemplateRpcImpl< Mempool, State, @@ -1432,54 +1365,50 @@ where > = self.clone(); let network = self.network.clone(); - async move { - if !network.is_regtest() { - return Err(Error { - code: ErrorCode::ServerError(0), - message: "generate is only supported on regtest".to_string(), - data: None, - }); - } + if !network.is_regtest() { + return Err(ErrorObject::borrowed( + 0, + "generate is only supported on regtest", + None, + )); + } - let mut block_hashes = Vec::new(); - for _ in 0..num_blocks { - let block_template = rpc - .get_block_template(None) - .await - .map_error(server::error::LegacyCode::default())?; - - let get_block_template::Response::TemplateMode(block_template) = block_template - else { - return Err(Error { - code: ErrorCode::ServerError(0), - message: "error generating block template".to_string(), - data: None, - }); - }; - - let proposal_block = proposal_block_from_template( - &block_template, - TimeSource::CurTime, - NetworkUpgrade::current(&network, Height(block_template.height)), - ) + let mut block_hashes = Vec::new(); + for _ in 0..num_blocks { + let block_template = rpc + .get_block_template(None) + .await .map_error(server::error::LegacyCode::default())?; - let hex_proposal_block = HexData( - proposal_block - .zcash_serialize_to_vec() - .map_error(server::error::LegacyCode::default())?, - ); - let _submit = rpc - .submit_block(hex_proposal_block, None) - .await - .map_error(server::error::LegacyCode::default())?; + let get_block_template::Response::TemplateMode(block_template) = block_template else { + return Err(ErrorObject::borrowed( + 0, + "error generating block template", + None, + )); + }; - block_hashes.push(GetBlockHash(proposal_block.hash())); - } + let proposal_block = proposal_block_from_template( + &block_template, + TimeSource::CurTime, + NetworkUpgrade::current(&network, Height(block_template.height)), + ) + .map_error(server::error::LegacyCode::default())?; + let hex_proposal_block = HexData( + proposal_block + .zcash_serialize_to_vec() + .map_error(server::error::LegacyCode::default())?, + ); - Ok(block_hashes) + let _submit = rpc + .submit_block(hex_proposal_block, None) + .await + .map_error(server::error::LegacyCode::default())?; + + block_hashes.push(GetBlockHash(proposal_block.hash())); } - .boxed() + + Ok(block_hashes) } } diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/constants.rs b/zebra-rpc/src/methods/get_block_template_rpcs/constants.rs index 3fd4696980d..950dff5db5d 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/constants.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/constants.rs @@ -1,6 +1,6 @@ //! Constant values used in mining rpcs methods. -use jsonrpc_core::ErrorCode; +use jsonrpsee_types::ErrorCode; use zebra_chain::{ block, diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs index 3a934d629ff..baa0200db1f 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs @@ -2,7 +2,8 @@ use std::{collections::HashMap, iter, sync::Arc}; -use jsonrpc_core::{Error, ErrorCode, Result}; +use jsonrpsee::core::RpcResult as Result; +use jsonrpsee_types::{ErrorCode, ErrorObject}; use tower::{Service, ServiceExt}; use zebra_chain::{ @@ -61,25 +62,23 @@ pub fn check_parameters(parameters: &Option) -> Result<()> { mode: GetBlockTemplateRequestMode::Proposal, data: None, .. - } => Err(Error { - code: ErrorCode::InvalidParams, - message: "\"data\" parameter must be \ - provided in \"proposal\" mode" - .to_string(), - data: None, - }), + } => Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), + "\"data\" parameter must be \ + provided in \"proposal\" mode", + None, + )), JsonParameters { mode: GetBlockTemplateRequestMode::Template, data: Some(_), .. - } => Err(Error { - code: ErrorCode::InvalidParams, - message: "\"data\" parameter must be \ - omitted in \"template\" mode" - .to_string(), - data: None, - }), + } => Err(ErrorObject::borrowed( + ErrorCode::InvalidParams.code(), + "\"data\" parameter must be \ + omitted in \"template\" mode", + None, + )), } } @@ -131,11 +130,7 @@ where let block_verifier_router_response = block_verifier_router .ready() .await - .map_err(|error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - })? + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))? .call(zebra_consensus::Request::CheckProposal(Arc::new(block))) .await; @@ -189,16 +184,14 @@ where Hint: check your network connection, clock, and time zone settings." ); - return Err(Error { - code: NOT_SYNCED_ERROR_CODE, - message: format!( - "Zebra has not synced to the chain tip, \ + return Err(ErrorObject::borrowed( + NOT_SYNCED_ERROR_CODE.code(), + "Zebra has not synced to the chain tip, \ estimated distance: {estimated_distance_to_chain_tip:?}, \ local tip: {local_tip_height:?}. \ - Hint: check your network connection, clock, and time zone settings." - ), - data: None, - }); + Hint: check your network connection, clock, and time zone settings.", + None, + )); } Ok(()) @@ -227,11 +220,7 @@ where let response = state .oneshot(request.clone()) .await - .map_err(|error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - })?; + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; let chain_info = match response { zebra_state::ReadResponse::ChainInfo(chain_info) => chain_info, @@ -261,11 +250,7 @@ where let response = mempool .oneshot(mempool::Request::FullTransactions) .await - .map_err(|error| Error { - code: ErrorCode::ServerError(0), - message: error.to_string(), - data: None, - })?; + .map_err(|error| ErrorObject::owned(0, error.to_string(), None::<()>))?; // TODO: Order transactions in block templates based on their dependencies diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs index 21627d509db..1caa1593c27 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs @@ -3,7 +3,7 @@ use zebra_chain::parameters::Network; /// Response to a `getmininginfo` RPC request. -#[derive(Debug, Default, PartialEq, Eq, serde::Serialize)] +#[derive(Debug, Default, Clone, PartialEq, Eq, serde::Serialize)] pub struct Response { /// The current tip height. #[serde(rename = "blocks")] diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs index 2513af85aa6..cec806901bb 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/submit_block.rs @@ -2,11 +2,11 @@ // Allow doc links to these imports. #[allow(unused_imports)] -use crate::methods::get_block_template_rpcs::GetBlockTemplateRpc; +use crate::methods::get_block_template_rpcs::GetBlockTemplate; /// Optional argument `jsonparametersobject` for `submitblock` RPC request /// -/// See notes for [`GetBlockTemplateRpc::submit_block`] method +/// See notes for [`crate::methods::GetBlockTemplateRpcServer::submit_block`] method #[derive(Clone, Debug, PartialEq, Eq, serde::Deserialize)] pub struct JsonParameters { /// The workid for the block template. Currently unused. @@ -28,7 +28,7 @@ pub struct JsonParameters { /// Response to a `submitblock` RPC request. /// /// Zebra never returns "duplicate-invalid", because it does not store invalid blocks. -#[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] #[serde(rename_all = "kebab-case")] pub enum ErrorResponse { /// Block was already committed to the non-finalized or finalized state @@ -44,7 +44,7 @@ pub enum ErrorResponse { /// Response to a `submitblock` RPC request. /// /// Zebra never returns "duplicate-invalid", because it does not store invalid blocks. -#[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] #[serde(untagged)] pub enum Response { /// Block was not successfully submitted, return error diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 0af5e03b0b9..8753d514c23 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -4,7 +4,7 @@ use std::{collections::HashSet, fmt::Debug, sync::Arc}; use futures::{join, FutureExt, TryFutureExt}; use hex::{FromHex, ToHex}; -use jsonrpc_core::{Error, ErrorCode}; +use jsonrpsee_types::{ErrorCode, ErrorObject}; use proptest::{collection::vec, prelude::*}; use thiserror::Error; use tokio::sync::oneshot; @@ -28,7 +28,7 @@ use zebra_test::mock_service::MockService; use crate::methods; use super::super::{ - AddressBalance, AddressStrings, NetworkUpgradeStatus, Rpc, RpcImpl, SentTransactionHash, + AddressBalance, AddressStrings, NetworkUpgradeStatus, RpcImpl, RpcServer, SentTransactionHash, }; proptest! { @@ -49,7 +49,7 @@ proptest! { let transaction_hex = hex::encode(&transaction_bytes); - let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex)); + let send_task = tokio::spawn(async move { rpc.send_raw_transaction(transaction_hex).await }); let unmined_transaction = UnminedTx::from(transaction); let expected_request = mempool::Request::Queue(vec![unmined_transaction.into()]); @@ -64,7 +64,7 @@ proptest! { state.expect_no_requests().await?; - let result = send_task.await?; + let result = send_task.await.expect("send_raw_transaction should not panic"); prop_assert_eq!(result, Ok(hash)); @@ -91,7 +91,9 @@ proptest! { let transaction_bytes = transaction.zcash_serialize_to_vec()?; let transaction_hex = hex::encode(&transaction_bytes); - let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex.clone())); + let _rpc = rpc.clone(); + let _transaction_hex = transaction_hex.clone(); + let send_task = tokio::spawn(async move { _rpc.send_raw_transaction(_transaction_hex).await }); let unmined_transaction = UnminedTx::from(transaction); let expected_request = mempool::Request::Queue(vec![unmined_transaction.clone().into()]); @@ -103,11 +105,11 @@ proptest! { state.expect_no_requests().await?; - let result = send_task.await?; + let result = send_task.await.expect("send_raw_transaction should not panic"); check_err_code(result, ErrorCode::ServerError(-1))?; - let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex)); + let send_task = tokio::spawn(async move { rpc.send_raw_transaction(transaction_hex.clone()).await }); let expected_request = mempool::Request::Queue(vec![unmined_transaction.clone().into()]); @@ -118,7 +120,7 @@ proptest! { .await? .respond(Ok::<_, BoxError>(mempool::Response::Queued(vec![Ok(rsp_rx)]))); - let result = send_task.await?; + let result = send_task.await.expect("send_raw_transaction should not panic"); check_err_code(result, ErrorCode::ServerError(-25))?; @@ -173,13 +175,13 @@ proptest! { tokio::time::pause(); runtime.block_on(async move { - let send_task = tokio::spawn(rpc.send_raw_transaction(non_hex_string)); + let send_task = rpc.send_raw_transaction(non_hex_string); // Check that there are no further requests. mempool.expect_no_requests().await?; state.expect_no_requests().await?; - check_err_code(send_task.await?, ErrorCode::ServerError(-22))?; + check_err_code(send_task.await, ErrorCode::ServerError(-22))?; // The queue task should continue without errors or panics prop_assert!(mempool_tx_queue.now_or_never().is_none()); @@ -204,12 +206,12 @@ proptest! { prop_assume!(Transaction::zcash_deserialize(&*random_bytes).is_err()); runtime.block_on(async move { - let send_task = tokio::spawn(rpc.send_raw_transaction(hex::encode(random_bytes))); + let send_task = rpc.send_raw_transaction(hex::encode(random_bytes)); mempool.expect_no_requests().await?; state.expect_no_requests().await?; - check_err_code(send_task.await?, ErrorCode::ServerError(-22))?; + check_err_code(send_task.await, ErrorCode::ServerError(-22))?; // The queue task should continue without errors or panics prop_assert!(mempool_tx_queue.now_or_never().is_none()); @@ -374,8 +376,8 @@ proptest! { let (response, _) = tokio::join!(response_fut, mock_state_handler); prop_assert_eq!( - &response.err().unwrap().message, - "no chain tip available yet" + response.err().unwrap().message().to_string(), + "no chain tip available yet".to_string() ); mempool.expect_no_requests().await?; @@ -603,8 +605,10 @@ proptest! { let transaction_hash = tx.hash(); let tx_bytes = tx.zcash_serialize_to_vec()?; let tx_hex = hex::encode(&tx_bytes); - let send_task = tokio::spawn(rpc.send_raw_transaction(tx_hex)); - + let send_task = { + let rpc = rpc.clone(); + tokio::task::spawn(async move { rpc.send_raw_transaction(tx_hex).await }) + }; let tx_unmined = UnminedTx::from(tx); let expected_request = mempool::Request::Queue(vec![tx_unmined.clone().into()]); @@ -678,10 +682,11 @@ proptest! { runtime.block_on(async move { let mut transactions_hash_set = HashSet::new(); for tx in txs.clone() { + let rpc_clone = rpc.clone(); // send a transaction let tx_bytes = tx.zcash_serialize_to_vec()?; let tx_hex = hex::encode(&tx_bytes); - let send_task = tokio::spawn(rpc.send_raw_transaction(tx_hex)); + let send_task = tokio::task::spawn(async move { rpc_clone.send_raw_transaction(tx_hex).await }); let tx_unmined = UnminedTx::from(tx.clone()); let expected_request = mempool::Request::Queue(vec![tx_unmined.clone().into()]); @@ -768,11 +773,22 @@ fn invalid_txid() -> BoxedStrategy { } /// Checks that the given RPC response contains the given error code. -fn check_err_code(rsp: Result, error_code: ErrorCode) -> Result<(), TestCaseError> { - prop_assert!( - matches!(&rsp, Err(Error { code, .. }) if *code == error_code), - "the RPC response must match the error code: {error_code:?}" - ); +fn check_err_code( + rsp: Result, + error_code: ErrorCode, +) -> Result<(), TestCaseError> { + match rsp { + Err(e) => { + prop_assert!( + e.code() == error_code.code(), + "the RPC response must match the error code: {:?}", + error_code.code() + ); + } + Ok(_) => { + prop_assert!(false, "expected an error response, but got Ok"); + } + } Ok(()) } diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index 2bdec5d7497..89ee464c70a 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -7,7 +7,9 @@ use std::{collections::BTreeMap, sync::Arc}; +use futures::FutureExt; use insta::dynamic_redaction; +use jsonrpsee::core::RpcResult as Result; use tower::buffer::Buffer; use zebra_chain::{ diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index 4949b419c43..a512faf7cfc 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -12,7 +12,7 @@ use std::{ use hex::FromHex; use insta::Settings; -use jsonrpc_core::Result; +use jsonrpsee::core::RpcResult as Result; use tower::{buffer::Buffer, Service}; use zebra_chain::{ @@ -47,7 +47,7 @@ use crate::methods::{ }, hex_data::HexData, tests::{snapshot::EXCESSIVE_BLOCK_HEIGHT, utils::fake_history_tree}, - GetBlockHash, GetBlockTemplateRpc, GetBlockTemplateRpcImpl, + GetBlockHash, GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer, }; pub async fn test_responses( @@ -488,20 +488,18 @@ pub async fn test_responses( // `z_listunifiedreceivers` let ua1 = String::from("u1l8xunezsvhq8fgzfl7404m450nwnd76zshscn6nfys7vyz2ywyh4cc5daaq0c7q2su5lqfh23sp7fkf3kt27ve5948mzpfdvckzaect2jtte308mkwlycj2u0eac077wu70vqcetkxf"); - let z_list_unified_receivers = - tokio::spawn(get_block_template_rpc.z_list_unified_receivers(ua1)) - .await - .expect("unexpected panic in z_list_unified_receivers RPC task") - .expect("unexpected error in z_list_unified_receivers RPC call"); + let z_list_unified_receivers = get_block_template_rpc + .z_list_unified_receivers(ua1) + .await + .expect("unexpected error in z_list_unified_receivers RPC call"); snapshot_rpc_z_listunifiedreceivers("ua1", z_list_unified_receivers, &settings); let ua2 = String::from("u1uf4qsmh037x2jp6k042h9d2w22wfp39y9cqdf8kcg0gqnkma2gf4g80nucnfeyde8ev7a6kf0029gnwqsgadvaye9740gzzpmr67nfkjjvzef7rkwqunqga4u4jges4tgptcju5ysd0"); - let z_list_unified_receivers = - tokio::spawn(get_block_template_rpc.z_list_unified_receivers(ua2)) - .await - .expect("unexpected panic in z_list_unified_receivers RPC task") - .expect("unexpected error in z_list_unified_receivers RPC call"); + let z_list_unified_receivers = get_block_template_rpc + .z_list_unified_receivers(ua2) + .await + .expect("unexpected error in z_list_unified_receivers RPC call"); snapshot_rpc_z_listunifiedreceivers("ua2", z_list_unified_receivers, &settings); } diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 01ddb4c3d31..e1f559b8e4f 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -3,6 +3,7 @@ use std::ops::RangeInclusive; use std::sync::Arc; +use futures::FutureExt; use tower::buffer::Buffer; use zebra_chain::serialization::ZcashSerialize; @@ -495,7 +496,7 @@ async fn rpc_getblock_missing_error() { // Make sure Zebra returns the correct error code `-8` for missing blocks // https://github.com/zcash/lightwalletd/blob/v0.4.16/common/common.go#L287-L290 - let block_future = tokio::spawn(rpc.get_block("0".to_string(), Some(0u8))); + let block_future = tokio::spawn(async move { rpc.get_block("0".to_string(), Some(0u8)).await }); // Make the mock service respond with no block let response_handler = state @@ -503,11 +504,10 @@ async fn rpc_getblock_missing_error() { .await; response_handler.respond(zebra_state::ReadResponse::Block(None)); - let block_response = block_future.await; - let block_response = block_response - .expect("unexpected panic in spawned request future") - .expect_err("unexpected success from missing block state response"); - assert_eq!(block_response.code, ErrorCode::ServerError(-8),); + let block_response = block_future.await.expect("block future should not panic"); + let block_response = + block_response.expect_err("unexpected success from missing block state response"); + assert_eq!(block_response.code(), ErrorCode::ServerError(-8).code()); // Now check the error string the way `lightwalletd` checks it assert_eq!( @@ -898,7 +898,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { .await .unwrap_err(); - assert_eq!(rpc_rsp.code, ErrorCode::ServerError(-5)); + assert_eq!(rpc_rsp.code(), ErrorCode::ServerError(-5).code()); mempool.expect_no_requests().await; @@ -918,7 +918,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { .await .unwrap_err(); assert_eq!( - error.message, + error.message(), "start Height(2) must be less than or equal to end Height(1)".to_string() ); @@ -934,7 +934,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { .await .unwrap_err(); assert_eq!( - error.message, + error.message(), "start Height(0) and end Height(1) must both be greater than zero".to_string() ); @@ -950,7 +950,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { .await .unwrap_err(); assert_eq!( - error.message, + error.message(), "start Height(1) and end Height(11) must both be less than or equal to the chain tip Height(10)".to_string() ); @@ -1096,7 +1096,7 @@ async fn rpc_getaddressutxos_invalid_arguments() { .await .unwrap_err(); - assert_eq!(error.code, ErrorCode::ServerError(-5)); + assert_eq!(error.code(), ErrorCode::ServerError(-5).code()); mempool.expect_no_requests().await; state.expect_no_requests().await; @@ -1253,7 +1253,10 @@ async fn rpc_getblockcount_empty_state() { assert!(get_block_count.is_err()); // Check the error we got is the correct one - assert_eq!(get_block_count.err().unwrap().message, "No blocks in state"); + assert_eq!( + get_block_count.err().unwrap().message(), + "No blocks in state" + ); mempool.expect_no_requests().await; } @@ -1697,8 +1700,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .expect_err("needs an error when estimated distance to network chain tip is far"); assert_eq!( - get_block_template_sync_error.code, - ErrorCode::ServerError(-10) + get_block_template_sync_error.code(), + ErrorCode::ServerError(-10).code() ); mock_sync_status.set_is_close_to_tip(false); @@ -1710,8 +1713,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .expect_err("needs an error when syncer is not close to tip"); assert_eq!( - get_block_template_sync_error.code, - ErrorCode::ServerError(-10) + get_block_template_sync_error.code(), + ErrorCode::ServerError(-10).code() ); mock_chain_tip_sender.send_estimated_distance_to_network_chain_tip(Some(200)); @@ -1721,8 +1724,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .expect_err("needs an error when syncer is not close to tip or estimated distance to network chain tip is far"); assert_eq!( - get_block_template_sync_error.code, - ErrorCode::ServerError(-10) + get_block_template_sync_error.code(), + ErrorCode::ServerError(-10).code() ); let get_block_template_sync_error = get_block_template_rpc @@ -1733,7 +1736,10 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .await .expect_err("needs an error when called in proposal mode without data"); - assert_eq!(get_block_template_sync_error.code, ErrorCode::InvalidParams); + assert_eq!( + get_block_template_sync_error.code(), + ErrorCode::InvalidParams.code() + ); let get_block_template_sync_error = get_block_template_rpc .get_block_template(Some(get_block_template::JsonParameters { @@ -1743,7 +1749,10 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .await .expect_err("needs an error when passing in block data in template mode"); - assert_eq!(get_block_template_sync_error.code, ErrorCode::InvalidParams); + assert_eq!( + get_block_template_sync_error.code(), + ErrorCode::InvalidParams.code() + ); // The long poll id is valid, so it returns a state error instead let get_block_template_sync_error = get_block_template_rpc @@ -1761,8 +1770,8 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { .expect_err("needs an error when the state is empty"); assert_eq!( - get_block_template_sync_error.code, - ErrorCode::ServerError(-10) + get_block_template_sync_error.code(), + ErrorCode::ServerError(-10).code() ); // Try getting mempool transactions with a different tip hash diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index 69ab36d8c00..c787071d74d 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -7,12 +7,11 @@ //! See the full list of //! [Differences between JSON-RPC 1.0 and 2.0.](https://www.simple-is-better.org/rpc/#differences-between-1-0-and-2-0) -use std::{fmt, panic, thread::available_parallelism}; +use std::{fmt, panic}; use cookie::Cookie; -use http_request_compatibility::With; -use jsonrpc_core::{Compatibility, MetaIoHandler}; -use jsonrpc_http_server::{CloseHandle, ServerBuilder}; +use jsonrpsee::server::middleware::rpc::RpcServiceBuilder; +use jsonrpsee::server::{Server, ServerHandle}; use tokio::task::JoinHandle; use tower::Service; use tracing::*; @@ -25,15 +24,15 @@ use zebra_node_services::mempool; use crate::{ config::Config, - methods::{Rpc, RpcImpl}, + methods::{RpcImpl, RpcServer as _}, server::{ - http_request_compatibility::HttpRequestMiddleware, + http_request_compatibility::HttpRequestMiddlewareLayer, rpc_call_compatibility::FixRpcResponseMiddleware, }, }; #[cfg(feature = "getblocktemplate-rpcs")] -use crate::methods::{GetBlockTemplateRpc, GetBlockTemplateRpcImpl}; +use crate::methods::{GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer}; pub mod cookie; pub mod error; @@ -55,8 +54,8 @@ pub struct RpcServer { /// Zebra's application version, with build metadata. build_version: String, - /// A handle that shuts down the RPC server. - close_handle: CloseHandle, + /// A server handle used to shuts down the RPC server. + close_handle: ServerHandle, } impl fmt::Debug for RpcServer { @@ -68,7 +67,7 @@ impl fmt::Debug for RpcServer { .field( "close_handle", // TODO: when it stabilises, use std::any::type_name_of_val(&self.close_handle) - &"CloseHandle", + &"ServerHandle", ) .finish() } @@ -77,6 +76,8 @@ impl fmt::Debug for RpcServer { /// The message to log when logging the RPC server's listen address pub const OPENED_RPC_ENDPOINT_MSG: &str = "Opened RPC endpoint at "; +type ServerTask = JoinHandle>; + impl RpcServer { /// Start a new RPC server endpoint using the supplied configs and services. /// @@ -90,7 +91,7 @@ impl RpcServer { // - put some of the configs or services in their own struct? // - replace VersionString with semver::Version, and update the tests to provide valid versions #[allow(clippy::too_many_arguments)] - pub fn spawn< + pub async fn spawn< VersionString, UserAgentString, Mempool, @@ -115,7 +116,7 @@ impl RpcServer { address_book: AddressBook, latest_chain_tip: Tip, network: Network, - ) -> (JoinHandle<()>, JoinHandle<()>, Option) + ) -> Result<(ServerTask, JoinHandle<()>), tower::BoxError> where VersionString: ToString + Clone + Send + 'static, UserAgentString: ToString + Clone + Send + 'static, @@ -150,136 +151,79 @@ impl RpcServer { SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { - if let Some(listen_addr) = config.listen_addr { - info!("Trying to open RPC endpoint at {}...", listen_addr,); - - // Create handler compatible with V1 and V2 RPC protocols - let mut io: MetaIoHandler<(), _> = - MetaIoHandler::new(Compatibility::Both, FixRpcResponseMiddleware); - + let listen_addr = config + .listen_addr + .expect("caller should make sure listen_addr is set"); + + #[cfg(feature = "getblocktemplate-rpcs")] + // Initialize the getblocktemplate rpc method handler + let get_block_template_rpc_impl = GetBlockTemplateRpcImpl::new( + &network, + mining_config.clone(), + mempool.clone(), + state.clone(), + latest_chain_tip.clone(), + block_verifier_router, + sync_status, + address_book, + ); + + // Initialize the rpc methods with the zebra version + let (rpc_impl, rpc_tx_queue_task_handle) = RpcImpl::new( + build_version.clone(), + user_agent, + network.clone(), + config.debug_force_finished_sync, #[cfg(feature = "getblocktemplate-rpcs")] - { - // Initialize the getblocktemplate rpc method handler - let get_block_template_rpc_impl = GetBlockTemplateRpcImpl::new( - &network, - mining_config.clone(), - mempool.clone(), - state.clone(), - latest_chain_tip.clone(), - block_verifier_router, - sync_status, - address_book, - ); - - io.extend_with(get_block_template_rpc_impl.to_delegate()); - } - - // Initialize the rpc methods with the zebra version - let (rpc_impl, rpc_tx_queue_task_handle) = RpcImpl::new( - build_version.clone(), - user_agent, - network.clone(), - config.debug_force_finished_sync, - #[cfg(feature = "getblocktemplate-rpcs")] - mining_config.debug_like_zcashd, - #[cfg(not(feature = "getblocktemplate-rpcs"))] - true, - mempool, - state, - latest_chain_tip, - ); - - io.extend_with(rpc_impl.to_delegate()); - - // If zero, automatically scale threads to the number of CPU cores - let mut parallel_cpu_threads = config.parallel_cpu_threads; - if parallel_cpu_threads == 0 { - parallel_cpu_threads = available_parallelism().map(usize::from).unwrap_or(1); - } - - // The server is a blocking task, which blocks on executor shutdown. - // So we need to start it in a std::thread. - // (Otherwise tokio panics on RPC port conflict, which shuts down the RPC server.) - let span = Span::current(); - let start_server = move || { - span.in_scope(|| { - let middleware = if config.enable_cookie_auth { - let cookie = Cookie::default(); - cookie::write_to_disk(&cookie, &config.cookie_dir) - .expect("Zebra must be able to write the auth cookie to the disk"); - HttpRequestMiddleware::default().with(cookie) - } else { - HttpRequestMiddleware::default() - }; - - // Use a different tokio executor from the rest of Zebra, - // so that large RPCs and any task handling bugs don't impact Zebra. - let server_instance = ServerBuilder::new(io) - .threads(parallel_cpu_threads) - // TODO: disable this security check if we see errors from lightwalletd - //.allowed_hosts(DomainsValidation::Disabled) - .request_middleware(middleware) - .start_http(&listen_addr) - .expect("Unable to start RPC server"); - - info!("{OPENED_RPC_ENDPOINT_MSG}{}", server_instance.address()); - - let close_handle = server_instance.close_handle(); - - let rpc_server_handle = RpcServer { - config, - network, - build_version: build_version.to_string(), - close_handle, - }; - - (server_instance, rpc_server_handle) - }) - }; - - // Propagate panics from the std::thread - let (server_instance, rpc_server_handle) = match std::thread::spawn(start_server).join() - { - Ok(rpc_server) => rpc_server, - Err(panic_object) => panic::resume_unwind(panic_object), - }; - - // The server is a blocking task, which blocks on executor shutdown. - // So we need to wait on it on a std::thread, inside a tokio blocking task. - // (Otherwise tokio panics when we shut down the RPC server.) - let span = Span::current(); - let wait_on_server = move || { - span.in_scope(|| { - server_instance.wait(); - - info!("Stopped RPC endpoint"); - }) - }; - - let span = Span::current(); - let rpc_server_task_handle = tokio::task::spawn_blocking(move || { - let thread_handle = std::thread::spawn(wait_on_server); - - // Propagate panics from the inner std::thread to the outer tokio blocking task - span.in_scope(|| match thread_handle.join() { - Ok(()) => (), - Err(panic_object) => panic::resume_unwind(panic_object), - }) - }); - - ( - rpc_server_task_handle, - rpc_tx_queue_task_handle, - Some(rpc_server_handle), - ) + mining_config.debug_like_zcashd, + #[cfg(not(feature = "getblocktemplate-rpcs"))] + true, + mempool, + state, + latest_chain_tip, + ); + + let http_middleware_layer = if config.enable_cookie_auth { + let cookie = Cookie::default(); + cookie::write_to_disk(&cookie, &config.cookie_dir) + .expect("Zebra must be able to write the auth cookie to the disk"); + HttpRequestMiddlewareLayer::new(Some(cookie)) } else { - // There is no RPC port, so the RPC tasks do nothing. - ( - tokio::task::spawn(futures::future::pending().in_current_span()), - tokio::task::spawn(futures::future::pending().in_current_span()), - None, - ) - } + HttpRequestMiddlewareLayer::new(None) + }; + + let http_middleware = tower::ServiceBuilder::new().layer(http_middleware_layer); + + let rpc_middleware = RpcServiceBuilder::new() + .rpc_logger(1024) + .layer_fn(FixRpcResponseMiddleware::new); + + let server_instance = Server::builder() + .http_only() + .set_http_middleware(http_middleware) + .set_rpc_middleware(rpc_middleware) + .build(listen_addr) + .await + .expect("Unable to start RPC server"); + let addr = server_instance + .local_addr() + .expect("Unable to get local address"); + info!("{OPENED_RPC_ENDPOINT_MSG}{}", addr); + + #[cfg(feature = "getblocktemplate-rpcs")] + let mut rpc_module = rpc_impl.into_rpc(); + #[cfg(not(feature = "getblocktemplate-rpcs"))] + let rpc_module = rpc_impl.into_rpc(); + #[cfg(feature = "getblocktemplate-rpcs")] + rpc_module + .merge(get_block_template_rpc_impl.into_rpc()) + .unwrap(); + + let server_task: JoinHandle> = tokio::spawn(async move { + server_instance.start(rpc_module).stopped().await; + Ok(()) + }); + Ok((server_task, rpc_tx_queue_task_handle)) } /// Shut down this RPC server, blocking the current thread. @@ -305,7 +249,7 @@ impl RpcServer { /// Shuts down this RPC server using its `close_handle`. /// /// See `shutdown_blocking()` for details. - fn shutdown_blocking_inner(close_handle: CloseHandle, config: Config) { + fn shutdown_blocking_inner(close_handle: ServerHandle, config: Config) { // The server is a blocking task, so it can't run inside a tokio thread. // See the note at wait_on_server. let span = Span::current(); @@ -321,7 +265,7 @@ impl RpcServer { } info!("Stopping RPC server"); - close_handle.clone().close(); + let _ = close_handle.stop(); debug!("Stopped RPC server"); }) }; diff --git a/zebra-rpc/src/server/error.rs b/zebra-rpc/src/server/error.rs index 4cfc7b38571..5130a16d533 100644 --- a/zebra-rpc/src/server/error.rs +++ b/zebra-rpc/src/server/error.rs @@ -1,4 +1,5 @@ //! RPC error codes & their handling. +use jsonrpsee_types::{ErrorCode, ErrorObject, ErrorObjectOwned}; /// Bitcoin RPC error codes /// @@ -51,22 +52,25 @@ pub enum LegacyCode { ClientInvalidIpOrSubnet = -30, } -impl From for jsonrpc_core::ErrorCode { +impl From for ErrorCode { fn from(code: LegacyCode) -> Self { - Self::ServerError(code as i64) + Self::ServerError(code as i32) } } -/// A trait for mapping errors to [`jsonrpc_core::Error`]. +impl From for i32 { + fn from(code: LegacyCode) -> Self { + code as i32 + } +} + +/// A trait for mapping errors to [`jsonrpsee_types::ErrorObjectOwned`]. pub(crate) trait MapError: Sized { - /// Maps errors to [`jsonrpc_core::Error`] with a specific error code. - fn map_error( - self, - code: impl Into, - ) -> std::result::Result; + /// Maps errors to [`jsonrpsee_types::ErrorObjectOwned`] with a specific error code. + fn map_error(self, code: impl Into) -> std::result::Result; - /// Maps errors to [`jsonrpc_core::Error`] with a [`LegacyCode::Misc`] error code. - fn map_misc_error(self) -> std::result::Result { + /// Maps errors to [`jsonrpsee_types::ErrorObjectOwned`] with a [`LegacyCode::Misc`] error code. + fn map_misc_error(self) -> std::result::Result { self.map_error(LegacyCode::Misc) } } @@ -77,15 +81,12 @@ pub(crate) trait OkOrError: Sized { /// message if conversion is to `Err`. fn ok_or_error( self, - code: impl Into, + code: impl Into, message: impl ToString, - ) -> std::result::Result; + ) -> std::result::Result; /// Converts the implementing type to `Result`, using a [`LegacyCode::Misc`] error code. - fn ok_or_misc_error( - self, - message: impl ToString, - ) -> std::result::Result { + fn ok_or_misc_error(self, message: impl ToString) -> std::result::Result { self.ok_or_error(LegacyCode::Misc, message) } } @@ -94,25 +95,21 @@ impl MapError for Result where E: ToString, { - fn map_error(self, code: impl Into) -> Result { - self.map_err(|error| jsonrpc_core::Error { - code: code.into(), - message: error.to_string(), - data: None, - }) + fn map_error(self, code: impl Into) -> Result { + self.map_err(|error| ErrorObject::owned(code.into().code(), error.to_string(), None::<()>)) } } impl OkOrError for Option { fn ok_or_error( self, - code: impl Into, + code: impl Into, message: impl ToString, - ) -> Result { - self.ok_or(jsonrpc_core::Error { - code: code.into(), - message: message.to_string(), - data: None, - }) + ) -> Result { + self.ok_or(ErrorObject::owned( + code.into().code(), + message.to_string(), + None::<()>, + )) } } diff --git a/zebra-rpc/src/server/http_request_compatibility.rs b/zebra-rpc/src/server/http_request_compatibility.rs index 89925c229b8..ebbf49c05d3 100644 --- a/zebra-rpc/src/server/http_request_compatibility.rs +++ b/zebra-rpc/src/server/http_request_compatibility.rs @@ -2,16 +2,25 @@ //! //! These fixes are applied at the HTTP level, before the RPC request is parsed. -use base64::{engine::general_purpose::URL_SAFE, Engine as _}; -use futures::TryStreamExt; -use jsonrpc_http_server::{ - hyper::{body::Bytes, header, Body, Request}, - RequestMiddleware, RequestMiddlewareAction, +use std::future::Future; + +use std::pin::Pin; + +use futures::{future, FutureExt}; +use http_body_util::BodyExt; +use hyper::{body::Bytes, header}; +use jsonrpsee::{ + core::BoxError, + server::{HttpBody, HttpRequest, HttpResponse}, }; +use jsonrpsee_types::ErrorObject; +use tower::Service; use super::cookie::Cookie; -/// HTTP [`RequestMiddleware`] with compatibility workarounds. +use base64::{engine::general_purpose::URL_SAFE, Engine as _}; + +/// HTTP [`HttpRequestMiddleware`] with compatibility workarounds. /// /// This middleware makes the following changes to HTTP requests: /// @@ -25,7 +34,7 @@ use super::cookie::Cookie; /// ### Add missing `content-type` HTTP header /// /// Some RPC clients don't include a `content-type` HTTP header. -/// But unlike web browsers, [`jsonrpc_http_server`] does not do content sniffing. +/// But unlike web browsers, [`jsonrpsee`] does not do content sniffing. /// /// If there is no `content-type` header, we assume the content is JSON, /// and let the parser error if we are incorrect. @@ -42,103 +51,30 @@ use super::cookie::Cookie; /// Any user-specified data in RPC requests is hex or base58check encoded. /// We assume lightwalletd validates data encodings before sending it on to Zebra. /// So any fixes Zebra performs won't change user-specified data. -#[derive(Clone, Debug, Default)] -pub struct HttpRequestMiddleware { +#[derive(Clone, Debug)] +pub struct HttpRequestMiddleware { + service: S, cookie: Option, } -/// A trait for updating an object, consuming it and returning the updated version. -pub trait With { - /// Updates `self` with an instance of type `T` and returns the updated version of `self`. - fn with(self, _: T) -> Self; -} - -impl With for HttpRequestMiddleware { - fn with(mut self, cookie: Cookie) -> Self { - self.cookie = Some(cookie); - self - } -} - -impl RequestMiddleware for HttpRequestMiddleware { - fn on_request(&self, mut request: Request) -> RequestMiddlewareAction { - tracing::trace!(?request, "original HTTP request"); - - // Check if the request is authenticated - if !self.check_credentials(request.headers_mut()) { - let error = jsonrpc_core::Error { - code: jsonrpc_core::ErrorCode::ServerError(401), - message: "unauthenticated method".to_string(), - data: None, - }; - return jsonrpc_http_server::Response { - code: jsonrpc_http_server::hyper::StatusCode::from_u16(401) - .expect("hard-coded status code should be valid"), - content_type: header::HeaderValue::from_static("application/json; charset=utf-8"), - content: serde_json::to_string(&jsonrpc_core::Response::from(error, None)) - .expect("hard-coded result should serialize"), - } - .into(); - } - - // Fix the request headers if needed and we can do so. - HttpRequestMiddleware::insert_or_replace_content_type_header(request.headers_mut()); - - // Fix the request body - let request = request.map(|body| { - let body = body.map_ok(|data| { - // To simplify data handling, we assume that any search strings won't be split - // across multiple `Bytes` data buffers. - // - // To simplify error handling, Zebra only supports valid UTF-8 requests, - // and uses lossy UTF-8 conversion. - // - // JSON-RPC requires all requests to be valid UTF-8. - // The lower layers should reject invalid requests with lossy changes. - // But if they accept some lossy changes, that's ok, - // because the request was non-standard anyway. - // - // We're not concerned about performance here, so we just clone the Cow - let data = String::from_utf8_lossy(data.as_ref()).to_string(); - - // Fix up the request. - let data = Self::remove_json_1_fields(data); - - Bytes::from(data) - }); - - Body::wrap_stream(body) - }); - - tracing::trace!(?request, "modified HTTP request"); - - RequestMiddlewareAction::Proceed { - // TODO: disable this security check if we see errors from lightwalletd. - should_continue_on_invalid_cors: false, - request, - } +impl HttpRequestMiddleware { + /// Create a new `HttpRequestMiddleware` with the given service and cookie. + pub fn new(service: S, cookie: Option) -> Self { + Self { service, cookie } } -} -impl HttpRequestMiddleware { - /// Remove any "jsonrpc: 1.0" fields in `data`, and return the resulting string. - pub fn remove_json_1_fields(data: String) -> String { - // Replace "jsonrpc = 1.0": - // - at the start or middle of a list, and - // - at the end of a list; - // with no spaces (lightwalletd format), and spaces after separators (example format). - // - // TODO: if we see errors from lightwalletd, make this replacement more accurate: - // - use a partial JSON fragment parser - // - combine the whole request into a single buffer, and use a JSON parser - // - use a regular expression - // - // We could also just handle the exact lightwalletd format, - // by replacing `{"jsonrpc":"1.0",` with `{`. - data.replace("\"jsonrpc\":\"1.0\",", "") - .replace("\"jsonrpc\": \"1.0\",", "") - .replace(",\"jsonrpc\":\"1.0\"", "") - .replace(", \"jsonrpc\": \"1.0\"", "") + /// Check if the request is authenticated. + pub fn check_credentials(&self, headers: &header::HeaderMap) -> bool { + self.cookie.as_ref().map_or(true, |internal_cookie| { + headers + .get(header::AUTHORIZATION) + .and_then(|auth_header| auth_header.to_str().ok()) + .and_then(|auth_header| auth_header.split_whitespace().nth(1)) + .and_then(|encoded| URL_SAFE.decode(encoded).ok()) + .and_then(|decoded| String::from_utf8(decoded).ok()) + .and_then(|request_cookie| request_cookie.split(':').nth(1).map(String::from)) + .map_or(false, |passwd| internal_cookie.authenticate(passwd)) + }) } /// Insert or replace client supplied `content-type` HTTP header to `application/json` in the following cases: @@ -182,17 +118,110 @@ impl HttpRequestMiddleware { } } - /// Check if the request is authenticated. - pub fn check_credentials(&self, headers: &header::HeaderMap) -> bool { - self.cookie.as_ref().map_or(true, |internal_cookie| { - headers - .get(header::AUTHORIZATION) - .and_then(|auth_header| auth_header.to_str().ok()) - .and_then(|auth_header| auth_header.split_whitespace().nth(1)) - .and_then(|encoded| URL_SAFE.decode(encoded).ok()) - .and_then(|decoded| String::from_utf8(decoded).ok()) - .and_then(|request_cookie| request_cookie.split(':').nth(1).map(String::from)) - .map_or(false, |passwd| internal_cookie.authenticate(passwd)) - }) + /// Remove any "jsonrpc: 1.0" fields in `data`, and return the resulting string. + pub fn remove_json_1_fields(data: String) -> String { + // Replace "jsonrpc = 1.0": + // - at the start or middle of a list, and + // - at the end of a list; + // with no spaces (lightwalletd format), and spaces after separators (example format). + // + // TODO: if we see errors from lightwalletd, make this replacement more accurate: + // - use a partial JSON fragment parser + // - combine the whole request into a single buffer, and use a JSON parser + // - use a regular expression + // + // We could also just handle the exact lightwalletd format, + // by replacing `{"jsonrpc":"1.0",` with `{"jsonrpc":"2.0`. + data.replace("\"jsonrpc\":\"1.0\",", "\"jsonrpc\":\"2.0\",") + .replace("\"jsonrpc\": \"1.0\",", "\"jsonrpc\": \"2.0\",") + .replace(",\"jsonrpc\":\"1.0\"", ",\"jsonrpc\":\"2.0\"") + .replace(", \"jsonrpc\": \"1.0\"", ", \"jsonrpc\": \"2.0\"") + } +} + +/// Implement the Layer for HttpRequestMiddleware to allow injecting the cookie +#[derive(Clone)] +pub struct HttpRequestMiddlewareLayer { + cookie: Option, +} + +impl HttpRequestMiddlewareLayer { + /// Create a new `HttpRequestMiddlewareLayer` with the given cookie. + pub fn new(cookie: Option) -> Self { + Self { cookie } + } +} + +impl tower::Layer for HttpRequestMiddlewareLayer { + type Service = HttpRequestMiddleware; + + fn layer(&self, service: S) -> Self::Service { + HttpRequestMiddleware::new(service, self.cookie.clone()) + } +} + +/// A trait for updating an object, consuming it and returning the updated version. +pub trait With { + /// Updates `self` with an instance of type `T` and returns the updated version of `self`. + fn with(self, _: T) -> Self; +} + +impl With for HttpRequestMiddleware { + fn with(mut self, cookie: Cookie) -> Self { + self.cookie = Some(cookie); + self + } +} + +impl Service> for HttpRequestMiddleware +where + S: Service + std::clone::Clone + Send + 'static, + S::Error: Into + 'static, + S::Future: Send + 'static, +{ + type Response = S::Response; + type Error = BoxError; + type Future = + Pin> + Send + 'static>>; + + fn poll_ready( + &mut self, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.service.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, mut request: HttpRequest) -> Self::Future { + // Check if the request is authenticated + if !self.check_credentials(request.headers_mut()) { + let error = ErrorObject::borrowed(401, "unauthenticated method", None); + // TODO: Error object is not being returned to the user but an empty response. + return future::err(BoxError::from(error)).boxed(); + } + + // Fix the request headers. + Self::insert_or_replace_content_type_header(request.headers_mut()); + + let mut service = self.service.clone(); + let (parts, body) = request.into_parts(); + + async move { + let bytes = body + .collect() + .await + .expect("Failed to collect body data") + .to_bytes(); + + let data = String::from_utf8_lossy(bytes.as_ref()).to_string(); + + // Fix JSON-RPC 1.0 requests. + let data = Self::remove_json_1_fields(data); + let body = HttpBody::from(Bytes::from(data).as_ref().to_vec()); + + let request = HttpRequest::from_parts(parts, body); + + service.call(request).await.map_err(Into::into) + } + .boxed() } } diff --git a/zebra-rpc/src/server/rpc_call_compatibility.rs b/zebra-rpc/src/server/rpc_call_compatibility.rs index 209596180c0..2bd22b72924 100644 --- a/zebra-rpc/src/server/rpc_call_compatibility.rs +++ b/zebra-rpc/src/server/rpc_call_compatibility.rs @@ -3,116 +3,66 @@ //! These fixes are applied at the JSON-RPC call level, //! after the RPC request is parsed and split into calls. -use std::future::Future; - -use futures::future::{Either, FutureExt}; - -use jsonrpc_core::{ - middleware::Middleware, - types::{Call, Failure, Output, Response}, - BoxFuture, Metadata, MethodCall, Notification, +use jsonrpsee::{ + server::middleware::rpc::{layer::ResponseFuture, RpcService, RpcServiceT}, + MethodResponse, }; +use jsonrpsee_types::ErrorObject; -use crate::server; - -/// JSON-RPC [`Middleware`] with compatibility workarounds. +/// JSON-RPC [`FixRpcResponseMiddleware`] with compatibility workarounds. /// /// This middleware makes the following changes to JSON-RPC calls: /// /// ## Make RPC framework response codes match `zcashd` /// -/// [`jsonrpc_core`] returns specific error codes while parsing requests: -/// +/// [`jsonrpsee_types`] returns specific error codes while parsing requests: +/// /// /// But these codes are different from `zcashd`, and some RPC clients rely on the exact code. -/// -/// ## Read-Only Functionality -/// -/// This middleware also logs unrecognized RPC requests. -pub struct FixRpcResponseMiddleware; - -impl Middleware for FixRpcResponseMiddleware { - type Future = BoxFuture>; - type CallFuture = BoxFuture>; - - fn on_call( - &self, - call: Call, - meta: M, - next: Next, - ) -> Either - where - Next: Fn(Call, M) -> NextFuture + Send + Sync, - NextFuture: Future> + Send + 'static, - { - Either::Left( - next(call.clone(), meta) - .map(|mut output| { - Self::fix_error_codes(&mut output); - output - }) - .inspect(|output| Self::log_if_error(output, call)) - .boxed(), - ) - } +/// Specifically, the [`jsonrpsee_types::error::INVALID_PARAMS_CODE`] is different: +/// +pub struct FixRpcResponseMiddleware { + service: RpcService, } impl FixRpcResponseMiddleware { - /// Replaces [`jsonrpc_core::ErrorCode`]s in the [`Output`] with their `zcashd` equivalents. - /// - /// ## Replaced Codes - /// - /// 1. [`jsonrpc_core::ErrorCode::InvalidParams`] -> [`server::error::LegacyCode::Misc`] - /// Rationale: - /// The `node-stratum-pool` mining pool library expects error code `-1` to detect available RPC methods: - /// - fn fix_error_codes(output: &mut Option) { - if let Some(Output::Failure(Failure { ref mut error, .. })) = output { - if matches!(error.code, jsonrpc_core::ErrorCode::InvalidParams) { - let original_code = error.code.clone(); - - error.code = server::error::LegacyCode::Misc.into(); - tracing::debug!("Replacing RPC error: {original_code:?} with {error}"); - } - } + /// Create a new `FixRpcResponseMiddleware` with the given `service`. + pub fn new(service: RpcService) -> Self { + Self { service } } +} - /// Obtain a description string for a received request. - /// - /// Prints out only the method name and the received parameters. - fn call_description(call: &Call) -> String { - const MAX_PARAMS_LOG_LENGTH: usize = 100; +impl<'a> RpcServiceT<'a> for FixRpcResponseMiddleware { + type Future = ResponseFuture>; - match call { - Call::MethodCall(MethodCall { method, params, .. }) => { - let mut params = format!("{params:?}"); - if params.len() >= MAX_PARAMS_LOG_LENGTH { - params.truncate(MAX_PARAMS_LOG_LENGTH); - params.push_str("..."); - } + fn call(&self, request: jsonrpsee::types::Request<'a>) -> Self::Future { + let service = self.service.clone(); + ResponseFuture::future(Box::pin(async move { + let response = service.call(request).await; + if response.is_error() { + let original_error_code = response + .as_error_code() + .expect("response should have an error code"); + if original_error_code == jsonrpsee_types::ErrorCode::InvalidParams.code() { + let new_error_code = crate::server::error::LegacyCode::Misc.into(); + tracing::debug!( + "Replacing RPC error: {original_error_code} with {new_error_code}" + ); + let json: serde_json::Value = + serde_json::from_str(response.into_parts().0.as_str()) + .expect("response string should be valid json"); + let id = json["id"] + .as_str() + .expect("response json should have an id") + .to_string(); - format!(r#"method = {method:?}, params = {params}"#) - } - Call::Notification(Notification { method, params, .. }) => { - let mut params = format!("{params:?}"); - if params.len() >= MAX_PARAMS_LOG_LENGTH { - params.truncate(MAX_PARAMS_LOG_LENGTH); - params.push_str("..."); + return MethodResponse::error( + jsonrpsee_types::Id::Str(id.into()), + ErrorObject::borrowed(new_error_code, "Invalid params", None), + ); } - - format!(r#"notification = {method:?}, params = {params}"#) } - Call::Invalid { .. } => "invalid request".to_owned(), - } - } - - /// Check RPC output and log any errors. - // - // TODO: do we want to ignore ErrorCode::ServerError(_), or log it at debug? - fn log_if_error(output: &Option, call: Call) { - if let Some(Output::Failure(Failure { error, .. })) = output { - let call_description = Self::call_description(&call); - tracing::info!("RPC error: {error} in call: {call_description}"); - } + response + })) } } diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index 8ffc3386a0d..bf850661a09 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -3,12 +3,8 @@ // These tests call functions which can take unit arguments if some features aren't enabled. #![allow(clippy::unit_arg)] -use std::{ - net::{Ipv4Addr, SocketAddrV4}, - time::Duration, -}; +use std::net::{Ipv4Addr, SocketAddrV4}; -use futures::FutureExt; use tower::buffer::Buffer; use zebra_chain::{ @@ -21,111 +17,71 @@ use zebra_test::mock_service::MockService; use super::super::*; -/// Test that the JSON-RPC server spawns when configured with a single thread. -#[test] -fn rpc_server_spawn_single_thread() { - rpc_server_spawn(false) -} - -/// Test that the JSON-RPC server spawns when configured with multiple threads. -#[test] -#[cfg(not(target_os = "windows"))] -fn rpc_server_spawn_parallel_threads() { - rpc_server_spawn(true) +/// Test that the JSON-RPC server spawns. +#[tokio::test] +async fn rpc_server_spawn_test() { + rpc_server_spawn().await } /// Test if the RPC server will spawn on a randomly generated port. -/// -/// Set `parallel_cpu_threads` to true to auto-configure based on the number of CPU cores. #[tracing::instrument] -fn rpc_server_spawn(parallel_cpu_threads: bool) { +async fn rpc_server_spawn() { let _init_guard = zebra_test::init(); let config = Config { listen_addr: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 0).into()), indexer_listen_addr: None, - parallel_cpu_threads: if parallel_cpu_threads { 2 } else { 1 }, + parallel_cpu_threads: 0, debug_force_finished_sync: false, cookie_dir: Default::default(), enable_cookie_auth: false, }; - let rt = tokio::runtime::Runtime::new().unwrap(); - - rt.block_on(async { - let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut block_verifier_router: MockService<_, _, _, BoxError> = - MockService::build().for_unit_tests(); - - info!("spawning RPC server..."); - - let (rpc_server_task_handle, rpc_tx_queue_task_handle, _rpc_server) = RpcServer::spawn( - config, - Default::default(), - "RPC server test", - "RPC server test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - ); - - info!("spawned RPC server, checking services..."); - - mempool.expect_no_requests().await; - state.expect_no_requests().await; - block_verifier_router.expect_no_requests().await; - - // The server and queue tasks should continue without errors or panics - let rpc_server_task_result = rpc_server_task_handle.now_or_never(); - assert!(rpc_server_task_result.is_none()); - - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(rpc_tx_queue_task_result.is_none()); - }); - - info!("waiting for RPC server to shut down..."); - rt.shutdown_timeout(Duration::from_secs(1)); -} - -/// Test that the JSON-RPC server spawns when configured with a single thread, -/// on an OS-assigned unallocated port. -#[test] -fn rpc_server_spawn_unallocated_port_single_thread() { - rpc_server_spawn_unallocated_port(false, false) -} - -/// Test that the JSON-RPC server spawns and shuts down when configured with a single thread, -/// on an OS-assigned unallocated port. -#[test] -fn rpc_server_spawn_unallocated_port_single_thread_shutdown() { - rpc_server_spawn_unallocated_port(false, true) + let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut block_verifier_router: MockService<_, _, _, BoxError> = + MockService::build().for_unit_tests(); + + info!("spawning RPC server..."); + + let _rpc_server_task_handle = RpcServer::spawn( + config, + Default::default(), + "RPC server test", + "RPC server test", + Buffer::new(mempool.clone(), 1), + Buffer::new(state.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), + MockSyncStatus::default(), + MockAddressBookPeers::default(), + NoChainTip, + Mainnet, + ); + + info!("spawned RPC server, checking services..."); + + mempool.expect_no_requests().await; + state.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; } -/// Test that the JSON-RPC server spawns when configured with multiple threads, -/// on an OS-assigned unallocated port. -#[test] -fn rpc_sever_spawn_unallocated_port_parallel_threads() { - rpc_server_spawn_unallocated_port(true, false) +/// Test that the JSON-RPC server spawns on an OS-assigned unallocated port. +#[tokio::test] +async fn rpc_server_spawn_unallocated_port() { + rpc_spawn_unallocated_port(false).await } -/// Test that the JSON-RPC server spawns and shuts down when configured with multiple threads, -/// on an OS-assigned unallocated port. -#[test] -fn rpc_sever_spawn_unallocated_port_parallel_threads_shutdown() { - rpc_server_spawn_unallocated_port(true, true) +/// Test that the JSON-RPC server spawns and shuts down on an OS-assigned unallocated port. +#[tokio::test] +async fn rpc_server_spawn_unallocated_port_shutdown() { + rpc_spawn_unallocated_port(true).await } /// Test if the RPC server will spawn on an OS-assigned unallocated port. /// -/// Set `parallel_cpu_threads` to true to auto-configure based on the number of CPU cores, -/// and `do_shutdown` to true to close the server using the close handle. +/// Set `do_shutdown` to true to close the server using the close handle. #[tracing::instrument] -fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bool) { +async fn rpc_spawn_unallocated_port(do_shutdown: bool) { let _init_guard = zebra_test::init(); let port = zebra_test::net::random_unallocated_port(); @@ -134,300 +90,111 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo let config = Config { listen_addr: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port).into()), indexer_listen_addr: None, - parallel_cpu_threads: if parallel_cpu_threads { 0 } else { 1 }, + parallel_cpu_threads: 0, debug_force_finished_sync: false, cookie_dir: Default::default(), enable_cookie_auth: false, }; - let rt = tokio::runtime::Runtime::new().unwrap(); - - rt.block_on(async { - let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut block_verifier_router: MockService<_, _, _, BoxError> = - MockService::build().for_unit_tests(); - - info!("spawning RPC server..."); - - let (rpc_server_task_handle, rpc_tx_queue_task_handle, rpc_server) = RpcServer::spawn( - config, - Default::default(), - "RPC server test", - "RPC server test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - ); - - info!("spawned RPC server, checking services..."); - - mempool.expect_no_requests().await; - state.expect_no_requests().await; - block_verifier_router.expect_no_requests().await; - - if do_shutdown { - rpc_server - .expect("unexpected missing RpcServer for configured RPC port") - .shutdown() - .await - .expect("unexpected panic during RpcServer shutdown"); - - // The server and queue tasks should shut down without errors or panics - let rpc_server_task_result = rpc_server_task_handle.await; - assert!( - matches!(rpc_server_task_result, Ok(())), - "unexpected server task panic during shutdown: {rpc_server_task_result:?}" - ); - - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.await; - assert!( - matches!(rpc_tx_queue_task_result, Ok(())), - "unexpected queue task panic during shutdown: {rpc_tx_queue_task_result:?}" - ); - } else { - // The server and queue tasks should continue without errors or panics - let rpc_server_task_result = rpc_server_task_handle.now_or_never(); - assert!(rpc_server_task_result.is_none()); - - let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(rpc_tx_queue_task_result.is_none()); - } - }); - - info!("waiting for RPC server to shut down..."); - rt.shutdown_timeout(Duration::from_secs(1)); + let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut block_verifier_router: MockService<_, _, _, BoxError> = + MockService::build().for_unit_tests(); + + info!("spawning RPC server..."); + + let rpc_server_task_handle = RpcServer::spawn( + config, + Default::default(), + "RPC server test", + "RPC server test", + Buffer::new(mempool.clone(), 1), + Buffer::new(state.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), + MockSyncStatus::default(), + MockAddressBookPeers::default(), + NoChainTip, + Mainnet, + ) + .await + .expect(""); + + info!("spawned RPC server, checking services..."); + + mempool.expect_no_requests().await; + state.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; + + if do_shutdown { + rpc_server_task_handle.0.abort(); + } } /// Test if the RPC server will panic correctly when there is a port conflict. /// /// This test is sometimes unreliable on Windows, and hangs on macOS. /// We believe this is a CI infrastructure issue, not a platform-specific issue. -#[test] +#[tokio::test] #[should_panic(expected = "Unable to start RPC server")] #[cfg(not(any(target_os = "windows", target_os = "macos")))] -fn rpc_server_spawn_port_conflict() { +async fn rpc_server_spawn_port_conflict() { + use std::time::Duration; let _init_guard = zebra_test::init(); let port = zebra_test::net::random_known_port(); let config = Config { listen_addr: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port).into()), indexer_listen_addr: None, - parallel_cpu_threads: 1, debug_force_finished_sync: false, + parallel_cpu_threads: 0, cookie_dir: Default::default(), enable_cookie_auth: false, }; - let rt = tokio::runtime::Runtime::new().unwrap(); - - let test_task_handle = rt.spawn(async { - let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut block_verifier_router: MockService<_, _, _, BoxError> = - MockService::build().for_unit_tests(); - - info!("spawning RPC server 1..."); - - let (_rpc_server_1_task_handle, _rpc_tx_queue_1_task_handle, _rpc_server) = - RpcServer::spawn( - config.clone(), - Default::default(), - "RPC server 1 test", - "RPC server 1 test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - ); - - tokio::time::sleep(Duration::from_secs(3)).await; - - info!("spawning conflicted RPC server 2..."); - - let (rpc_server_2_task_handle, _rpc_tx_queue_2_task_handle, _rpc_server) = RpcServer::spawn( - config, - Default::default(), - "RPC server 2 conflict test", - "RPC server 2 conflict test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - ); - - info!("spawned RPC servers, checking services..."); - - mempool.expect_no_requests().await; - state.expect_no_requests().await; - block_verifier_router.expect_no_requests().await; - - // Because there is a panic inside a multi-threaded executor, - // we can't depend on the exact behaviour of the other tasks, - // particularly across different machines and OSes. - - // The second server should panic, so its task handle should return the panic - let rpc_server_2_task_result = rpc_server_2_task_handle.await; - match rpc_server_2_task_result { - Ok(()) => panic!( - "RPC server with conflicting port should exit with an error: \ - unexpected Ok result" - ), - Err(join_error) => match join_error.try_into_panic() { - Ok(panic_object) => panic::resume_unwind(panic_object), - Err(cancelled_error) => panic!( - "RPC server with conflicting port should exit with an error: \ - unexpected JoinError: {cancelled_error:?}" - ), - }, - } - - // Ignore the queue task result - }); - - // Wait until the spawned task finishes - std::thread::sleep(Duration::from_secs(10)); - - info!("waiting for RPC server to shut down..."); - rt.shutdown_timeout(Duration::from_secs(3)); - - match test_task_handle.now_or_never() { - Some(Ok(_never)) => unreachable!("test task always panics"), - None => panic!("unexpected test task hang"), - Some(Err(join_error)) => match join_error.try_into_panic() { - Ok(panic_object) => panic::resume_unwind(panic_object), - Err(cancelled_error) => panic!( - "test task should exit with a RPC server panic: \ - unexpected non-panic JoinError: {cancelled_error:?}" - ), - }, - } -} - -/// Check if the RPC server detects a port conflict when running parallel threads. -/// -/// If this test fails, that's great! -/// We can make parallel the default, and remove the warnings in the config docs. -/// -/// This test is sometimes unreliable on Windows, and hangs on macOS. -/// We believe this is a CI infrastructure issue, not a platform-specific issue. -#[test] -#[cfg(not(any(target_os = "windows", target_os = "macos")))] -fn rpc_server_spawn_port_conflict_parallel_auto() { - let _init_guard = zebra_test::init(); - - let port = zebra_test::net::random_known_port(); - let config = Config { - listen_addr: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port).into()), - indexer_listen_addr: None, - parallel_cpu_threads: 2, - debug_force_finished_sync: false, - cookie_dir: Default::default(), - enable_cookie_auth: false, - }; - - let rt = tokio::runtime::Runtime::new().unwrap(); - - let test_task_handle = rt.spawn(async { - let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut block_verifier_router: MockService<_, _, _, BoxError> = - MockService::build().for_unit_tests(); - - info!("spawning parallel RPC server 1..."); - - let (_rpc_server_1_task_handle, _rpc_tx_queue_1_task_handle, _rpc_server) = - RpcServer::spawn( - config.clone(), - Default::default(), - "RPC server 1 test", - "RPC server 1 test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - ); - - tokio::time::sleep(Duration::from_secs(3)).await; - - info!("spawning parallel conflicted RPC server 2..."); - - let (rpc_server_2_task_handle, _rpc_tx_queue_2_task_handle, _rpc_server) = RpcServer::spawn( - config, - Default::default(), - "RPC server 2 conflict test", - "RPC server 2 conflict test", - Buffer::new(mempool.clone(), 1), - Buffer::new(state.clone(), 1), - Buffer::new(block_verifier_router.clone(), 1), - MockSyncStatus::default(), - MockAddressBookPeers::default(), - NoChainTip, - Mainnet, - ); - - info!("spawned RPC servers, checking services..."); - - mempool.expect_no_requests().await; - state.expect_no_requests().await; - block_verifier_router.expect_no_requests().await; - - // Because there might be a panic inside a multi-threaded executor, - // we can't depend on the exact behaviour of the other tasks, - // particularly across different machines and OSes. - - // The second server doesn't panic, but we'd like it to. - // (See the function docs for details.) - let rpc_server_2_task_result = rpc_server_2_task_handle.await; - match rpc_server_2_task_result { - Ok(()) => info!( - "Parallel RPC server with conflicting port should exit with an error: \ - but we're ok with it ignoring the conflict for now" - ), - Err(join_error) => match join_error.try_into_panic() { - Ok(panic_object) => panic::resume_unwind(panic_object), - Err(cancelled_error) => info!( - "Parallel RPC server with conflicting port should exit with an error: \ - but we're ok with it ignoring the conflict for now: \ - unexpected JoinError: {cancelled_error:?}" - ), - }, - } - - // Ignore the queue task result - }); - - // Wait until the spawned task finishes - std::thread::sleep(Duration::from_secs(10)); - - info!("waiting for parallel RPC server to shut down..."); - rt.shutdown_timeout(Duration::from_secs(3)); - - match test_task_handle.now_or_never() { - Some(Ok(())) => { - info!("parallel RPC server task successfully exited"); - } - None => panic!("unexpected test task hang"), - Some(Err(join_error)) => match join_error.try_into_panic() { - Ok(panic_object) => panic::resume_unwind(panic_object), - Err(cancelled_error) => info!( - "Parallel RPC server with conflicting port should exit with an error: \ - but we're ok with it ignoring the conflict for now: \ - unexpected JoinError: {cancelled_error:?}" - ), - }, - } + let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); + let mut block_verifier_router: MockService<_, _, _, BoxError> = + MockService::build().for_unit_tests(); + + info!("spawning RPC server 1..."); + + let _rpc_server_1_task_handle = RpcServer::spawn( + config.clone(), + Default::default(), + "RPC server 1 test", + "RPC server 1 test", + Buffer::new(mempool.clone(), 1), + Buffer::new(state.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), + MockSyncStatus::default(), + MockAddressBookPeers::default(), + NoChainTip, + Mainnet, + ) + .await; + + tokio::time::sleep(Duration::from_secs(3)).await; + + info!("spawning conflicted RPC server 2..."); + + let _rpc_server_2_task_handle = RpcServer::spawn( + config, + Default::default(), + "RPC server 2 conflict test", + "RPC server 2 conflict test", + Buffer::new(mempool.clone(), 1), + Buffer::new(state.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), + MockSyncStatus::default(), + MockAddressBookPeers::default(), + NoChainTip, + Mainnet, + ) + .await; + + info!("spawned RPC servers, checking services..."); + + mempool.expect_no_requests().await; + state.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; } diff --git a/zebra-rpc/src/sync.rs b/zebra-rpc/src/sync.rs index 40373d0eaed..c678f580b4a 100644 --- a/zebra-rpc/src/sync.rs +++ b/zebra-rpc/src/sync.rs @@ -382,9 +382,10 @@ impl SyncerRpcMethods for RpcRequestClient { } Err(err) if err - .downcast_ref::() + .downcast_ref::() .is_some_and(|err| { - err.code == server::error::LegacyCode::InvalidParameter.into() + let code: i32 = server::error::LegacyCode::InvalidParameter.into(); + err.code() == code }) => { Ok(None) diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index cb3e417d0cf..9cf2d1e4095 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -254,7 +254,7 @@ tonic-build = { version = "0.12.3", optional = true } abscissa_core = { version = "0.7.0", features = ["testing"] } hex = "0.4.3" hex-literal = "0.4.1" -jsonrpc-core = "18.0.0" +jsonrpsee-types = "0.24.7" once_cell = "1.20.2" regex = "1.11.0" insta = { version = "1.41.1", features = ["json"] } diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 2f8a1563b8a..ab06e546fc8 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -243,20 +243,31 @@ impl StartCmd { } // Launch RPC server - info!("spawning RPC server"); - let (rpc_task_handle, rpc_tx_queue_task_handle, rpc_server) = RpcServer::spawn( - config.rpc.clone(), - config.mining.clone(), - build_version(), - user_agent(), - mempool.clone(), - read_only_state_service.clone(), - block_verifier_router.clone(), - sync_status.clone(), - address_book.clone(), - latest_chain_tip.clone(), - config.network.network.clone(), - ); + let (rpc_task_handle, mut rpc_tx_queue_task_handle) = + if let Some(listen_addr) = config.rpc.listen_addr { + info!("spawning RPC server"); + info!("Trying to open RPC endpoint at {}...", listen_addr,); + let rpc_task_handle = RpcServer::spawn( + config.rpc.clone(), + config.mining.clone(), + build_version(), + user_agent(), + mempool.clone(), + read_only_state_service.clone(), + block_verifier_router.clone(), + sync_status.clone(), + address_book.clone(), + latest_chain_tip.clone(), + config.network.network.clone(), + ); + rpc_task_handle.await.unwrap() + } else { + warn!("configure an listen_addr to start the RPC server"); + ( + tokio::spawn(std::future::pending().in_current_span()), + tokio::spawn(std::future::pending().in_current_span()), + ) + }; // TODO: Add a shutdown signal and start the server with `serve_with_incoming_shutdown()` if // any related unit tests sometimes crash with memory errors @@ -399,7 +410,6 @@ impl StartCmd { // ongoing tasks pin!(rpc_task_handle); pin!(indexer_rpc_task_handle); - pin!(rpc_tx_queue_task_handle); pin!(syncer_task_handle); pin!(block_gossip_task_handle); pin!(mempool_crawler_task_handle); @@ -425,17 +435,10 @@ impl StartCmd { let mut exit_when_task_finishes = true; let result = select! { - rpc_result = &mut rpc_task_handle => { - rpc_result + rpc_join_result = &mut rpc_task_handle => { + let rpc_server_result = rpc_join_result .expect("unexpected panic in the rpc task"); - info!("rpc task exited"); - Ok(()) - } - - indexer_rpc_join_result = &mut indexer_rpc_task_handle => { - let indexer_rpc_server_result = indexer_rpc_join_result - .expect("unexpected panic in the rpc task"); - info!(?indexer_rpc_server_result, "indexer rpc task exited"); + info!(?rpc_server_result, "rpc task exited"); Ok(()) } @@ -446,6 +449,13 @@ impl StartCmd { Ok(()) } + indexer_rpc_join_result = &mut indexer_rpc_task_handle => { + let indexer_rpc_server_result = indexer_rpc_join_result + .expect("unexpected panic in the indexer task"); + info!(?indexer_rpc_server_result, "indexer rpc task exited"); + Ok(()) + } + sync_result = &mut syncer_task_handle => sync_result .expect("unexpected panic in the syncer task") .map(|_| info!("syncer task exited")), @@ -536,15 +546,6 @@ impl StartCmd { state_checkpoint_verify_handle.abort(); old_databases_task_handle.abort(); - // Wait until the RPC server shuts down. - // This can take around 150 seconds. - // - // Without this shutdown, Zebra's RPC unit tests sometimes crashed with memory errors. - if let Some(rpc_server) = rpc_server { - info!("waiting for RPC server to shut down"); - rpc_server.shutdown_blocking(); - } - info!("exiting Zebra: all tasks have been asked to stop, waiting for remaining tasks to finish"); exit_status diff --git a/zebrad/src/components/miner.rs b/zebrad/src/components/miner.rs index cb32cc91981..ee4960a5d03 100644 --- a/zebrad/src/components/miner.rs +++ b/zebrad/src/components/miner.rs @@ -35,7 +35,7 @@ use zebra_rpc::{ GetBlockTemplateCapability::*, GetBlockTemplateRequestMode::*, }, hex_data::HexData, - GetBlockTemplateRpc, GetBlockTemplateRpcImpl, + GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer, }, }; use zebra_state::WatchReceiver; diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 3dfc959eb58..ef2de55dc83 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -3270,7 +3270,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { types::submit_block, }, hex_data::HexData, - GetBlockTemplateRpc, GetBlockTemplateRpcImpl, + GetBlockTemplateRpcImpl, GetBlockTemplateRpcServer, }; use zebra_test::mock_service::MockService; let _init_guard = zebra_test::init(); diff --git a/zebrad/tests/common/regtest.rs b/zebrad/tests/common/regtest.rs index acd89d89aba..efd3c08875b 100644 --- a/zebrad/tests/common/regtest.rs +++ b/zebrad/tests/common/regtest.rs @@ -161,9 +161,10 @@ impl MiningRpcMethods for RpcRequestClient { } Err(err) if err - .downcast_ref::() + .downcast_ref::() .is_some_and(|err| { - err.code == server::error::LegacyCode::InvalidParameter.into() + let error: i32 = server::error::LegacyCode::InvalidParameter.into(); + err.code() == error }) => { Ok(None) diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_0_1.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_0_1.snap index 9e830f19e61..d277043f701 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_0_1.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_0_1.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "orchard", "start_index": 0, @@ -13,6 +14,5 @@ expression: parsed "end_height": 1707429 } ] - }, - "id": 123 + } } diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_338_1.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_338_1.snap index bcaa36d61fd..c683839781e 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_338_1.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_338_1.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "orchard", "start_index": 338, @@ -13,6 +14,5 @@ expression: parsed "end_height": 1888929 } ] - }, - "id": 123 + } } diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_585_1.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_585_1.snap index 945af42ca5f..ec880d7df6f 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_585_1.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_orchard_585_1.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "orchard", "start_index": 585, @@ -13,6 +14,5 @@ expression: parsed "end_height": 2000126 } ] - }, - "id": 123 + } } diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_1.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_1.snap index 2cf43dd6098..08f8744fadd 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_1.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_1.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "sapling", "start_index": 0, @@ -13,6 +14,5 @@ expression: parsed "end_height": 558822 } ] - }, - "id": 123 + } } diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_11.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_11.snap index d709a53f0c6..f76f202706c 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_11.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_0_11.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "sapling", "start_index": 0, @@ -53,6 +54,5 @@ expression: parsed "end_height": 1363036 } ] - }, - "id": 123 + } } diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_1090_6.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_1090_6.snap index ad9e68b1620..0274c501497 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_1090_6.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_1090_6.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "sapling", "start_index": 1090, @@ -33,6 +34,5 @@ expression: parsed "end_height": 2056616 } ] - }, - "id": 123 + } } diff --git a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_17_1.snap b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_17_1.snap index 8e0ddc1fb67..d2c983d78a8 100644 --- a/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_17_1.snap +++ b/zebrad/tests/snapshots/z_getsubtreesbyindex_mainnet_sapling_17_1.snap @@ -4,6 +4,7 @@ expression: parsed --- { "jsonrpc": "2.0", + "id": 123, "result": { "pool": "sapling", "start_index": 17, @@ -13,6 +14,5 @@ expression: parsed "end_height": 1703171 } ] - }, - "id": 123 + } }