diff --git a/.dockerignore b/.dockerignore index 5c69ffc54..62ffd3f04 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,5 +2,6 @@ node_modules /dist/* !/dist/dashboard logs +c2d_storage .env.local .env \ No newline at end of file diff --git a/.gitignore b/.gitignore index 3400eb173..a46225281 100644 --- a/.gitignore +++ b/.gitignore @@ -158,3 +158,4 @@ html-report.html # databases *.sqlite databases/* +c2d_storage/* diff --git a/docs/env.md b/docs/env.md index 9c718177c..445866f73 100644 --- a/docs/env.md +++ b/docs/env.md @@ -84,3 +84,8 @@ Environmental variables are also tracked in `ENVIRONMENT_VARIABLES` within `src/ - `NODE1_PRIVATE_KEY`: Used on test environments, specifically CI, represents the private key for node 1. Example: `"0xfd5c1ccea015b6d663618850824154a3b3fb2882c46cefb05b9a93fea8c3d215"` - `NODE2_PRIVATE_KEY`: Used on test environments, specifically CI, represents the private key for node 2. Example: `"0x1263dc73bef43a9da06149c7e598f52025bf4027f1d6c13896b71e81bb9233fb"` + +## Cron Jobs + +- `CRON_DELETE_DB_LOGS`: Delete old logs from database Cron expression. Example: `0 0 * * *` (runs every day at midnight) +- `CRON_CLEANUP_C2D_STORAGE`: Clear c2d expired resources/storage and delete old jobs. Example: `*/5 * * * *` (runs every 5 minutes) diff --git a/package-lock.json b/package-lock.json index 94f029570..935a76cb0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -50,6 +50,8 @@ "base58-js": "^2.0.0", "cors": "^2.8.5", "delay": "^5.0.0", + "docker-registry-client": "^3.4.0", + "dockerode": "^4.0.2", "dotenv": "^16.3.1", "eciesjs": "^0.4.5", "eth-crypto": "^2.6.0", @@ -71,6 +73,7 @@ "sinon": "^17.0.1", "sqlite3": "^5.1.7", "stream-concat": "^1.0.0", + "tar": "^7.4.3", "ts-node": "^10.9.1", "tsoa": "^5.1.1", "uint8arrays": "^4.0.6", @@ -82,6 +85,7 @@ "devDependencies": { "@types/chai": "^4.3.10", "@types/cors": "^2.8.17", + "@types/dockerode": "^3.3.31", "@types/express": "^4.17.17", "@types/ip": "^1.1.3", "@types/lzma-native": "^4.0.4", @@ -1315,6 +1319,12 @@ "node": ">=6.9.0" } }, + "node_modules/@balena/dockerignore": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@balena/dockerignore/-/dockerignore-1.0.2.tgz", + "integrity": "sha512-wMue2Sy4GAVTk6Ic4tJVcnfdau+gx2EnG7S+uAEe+TWJFqE4YoWN4/H8MSLj4eYJKxGg26lZwboEniNiNwZQ6Q==", + "license": "Apache-2.0" + }, "node_modules/@bcoe/v8-coverage": { "version": "0.2.3", "license": "MIT" @@ -2432,6 +2442,114 @@ "node": ">=18" } }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/@isaacs/fs-minipass": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", + "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", + "dependencies": { + "minipass": "^7.0.4" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@isaacs/fs-minipass/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/@istanbuljs/load-nyc-config": { "version": "1.1.0", "license": "ISC", @@ -4117,6 +4235,15 @@ "typescript": "^3 || ^4" } }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "optional": true, + "engines": { + "node": ">=14" + } + }, "node_modules/@pkgr/utils": { "version": "2.4.2", "dev": true, @@ -5318,6 +5445,29 @@ "@types/node": "*" } }, + "node_modules/@types/docker-modem": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/docker-modem/-/docker-modem-3.0.6.tgz", + "integrity": "sha512-yKpAGEuKRSS8wwx0joknWxsmLha78wNMe9R2S3UNsVOkZded8UqOrV8KoeDXoXsjndxwyF3eIhyClGbO1SEhEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "@types/ssh2": "*" + } + }, + "node_modules/@types/dockerode": { + "version": "3.3.31", + "resolved": "https://registry.npmjs.org/@types/dockerode/-/dockerode-3.3.31.tgz", + "integrity": "sha512-42R9eoVqJDSvVspV89g7RwRqfNExgievLNWoHkg7NoWIqAmavIbgQBb4oc0qRtHkxE+I3Xxvqv7qVXFABKPBTg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/docker-modem": "*", + "@types/node": "*", + "@types/ssh2": "*" + } + }, "node_modules/@types/express": { "version": "4.17.20", "license": "MIT", @@ -5769,6 +5919,26 @@ "version": "8.1.4", "license": "MIT" }, + "node_modules/@types/ssh2": { + "version": "1.15.1", + "resolved": "https://registry.npmjs.org/@types/ssh2/-/ssh2-1.15.1.tgz", + "integrity": "sha512-ZIbEqKAsi5gj35y4P4vkJYly642wIbY6PqoN0xiyQGshKUGXR9WQjF/iF9mXBQ8uBKy3ezfsCkcoHKhd0BzuDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "^18.11.18" + } + }, + "node_modules/@types/ssh2/node_modules/@types/node": { + "version": "18.19.54", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.54.tgz", + "integrity": "sha512-+BRgt0G5gYjTvdLac9sIeE0iZcJxi4Jc4PV5EUzqi+88jmQLr+fRZdv2tCTV7IHKSGxM6SaLoOXQWWUiLUItMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~5.26.4" + } + }, "node_modules/@types/triple-beam": { "version": "1.3.4", "license": "MIT" @@ -7320,6 +7490,41 @@ "node": ">=0.10.0" } }, + "node_modules/asn1": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", + "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", + "license": "MIT", + "dependencies": { + "safer-buffer": "~2.1.0" + } + }, + "node_modules/asn1.js": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-2.2.1.tgz", + "integrity": "sha512-x1HxYATfjnV+SrtHZR9rxzRvTgZaGAtT/nJB3TPmBxtoEVQVRPArNSzCA+1fVYlHYV/zmMLUJhtZVRcn7WMjfQ==", + "license": "MIT", + "dependencies": { + "bn.js": "^2.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/asn1.js-rfc3280": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/asn1.js-rfc3280/-/asn1.js-rfc3280-2.1.1.tgz", + "integrity": "sha512-/mwtgTbv+xElp8rAw0YPzPoBp4PkV2gl/TRHt9KuK7ZyQXWnTeclQpDJnKZlxCluKr5WAc9tO1NBArLx1egZJQ==", + "license": "MIT", + "peerDependencies": { + "asn1.js": "^2.0.0" + } + }, + "node_modules/asn1.js/node_modules/bn.js": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-2.2.0.tgz", + "integrity": "sha512-nlotTGN6qr+NpeCb8d5mdXR47r6GXiyoX4fEeqBF2u9wp/3XgzIwyftMX9TE+StQRJSOUJtyYr9MVk0rn2ftAg==", + "license": "MIT" + }, "node_modules/asn1js": { "version": "3.0.5", "resolved": "https://registry.npmjs.org/asn1js/-/asn1js-3.0.5.tgz", @@ -7333,6 +7538,14 @@ "node": ">=12.0.0" } }, + "node_modules/assert-plus": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-0.1.5.tgz", + "integrity": "sha512-brU24g7ryhRwGCI2y+1dGQmQXiZF7TtIj583S96y0jjdajIe6wn8BuXyELYhvD22dtIxDQVFk04YTJwwdwOYJw==", + "engines": { + "node": ">=0.8" + } + }, "node_modules/assertion-error": { "version": "1.1.0", "license": "MIT", @@ -7495,6 +7708,18 @@ "version": "0.4.4", "license": "MIT" }, + "node_modules/backoff": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/backoff/-/backoff-2.5.0.tgz", + "integrity": "sha512-wC5ihrnUXmR2douXmXLCe5O3zg3GKIyvRi/hi58a/XyRxVI+3/yM0PYueQOZXPXQ9pxBislYkw+sF9b7C/RuMA==", + "license": "MIT", + "dependencies": { + "precond": "0.2" + }, + "engines": { + "node": ">= 0.6" + } + }, "node_modules/balanced-match": { "version": "1.0.2", "license": "MIT" @@ -7531,6 +7756,100 @@ ], "license": "MIT" }, + "node_modules/base64url": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/base64url/-/base64url-1.0.6.tgz", + "integrity": "sha512-YJUNcKuU8Df1LhS3s9OzoYCAOZYHgAUGnDlPgXFCaJZwRzZLcnQ7uM9KRY6EFaJRvzxZqw2w+wCDigwpe+4XUw==", + "license": "MIT", + "dependencies": { + "concat-stream": "~1.4.7", + "meow": "~2.0.0" + }, + "bin": { + "base64url": "bin/base64url" + } + }, + "node_modules/base64url/node_modules/camelcase": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-1.2.1.tgz", + "integrity": "sha512-wzLkDa4K/mzI1OSITC+DUyjgIl/ETNHE9QvYgy6J6Jvqyyz4C0Xfd+lQhb19sX2jMpZV4IssUn0VDVmglV+s4g==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base64url/node_modules/camelcase-keys": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-1.0.0.tgz", + "integrity": "sha512-hwNYKTjJTlDabjJp2xn0h8bRmOpObvXVgYbQmR+Xob/EeBDtYea3xttjr5hqiWqLWtI3/6xO7x1ZAktQ9up+ag==", + "license": "MIT", + "dependencies": { + "camelcase": "^1.0.1", + "map-obj": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base64url/node_modules/get-stdin": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-4.0.1.tgz", + "integrity": "sha512-F5aQMywwJ2n85s4hJPTT9RPxGmubonuB10MNYo17/xph174n2MIR33HRguhzVag10O/npM7SPk73LMZNP+FaWw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base64url/node_modules/indent-string": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-1.2.2.tgz", + "integrity": "sha512-Z1vqf6lDC3f4N2mWqRywY6odjRatPNGDZgUr4DY9MLC14+Fp2/y+CI/RnNGlb8hD6ckscE/8DlZUwHUaiDBshg==", + "license": "MIT", + "dependencies": { + "get-stdin": "^4.0.1", + "minimist": "^1.1.0", + "repeating": "^1.1.0" + }, + "bin": { + "indent-string": "cli.js" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base64url/node_modules/map-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", + "integrity": "sha512-7N/q3lyZ+LVCp7PzuxrJr4KMbBE2hW7BT7YNia330OFxIf4d3r5zVpicP2650l7CPN6RM9zOJRl3NGpqSiw3Eg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base64url/node_modules/meow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/meow/-/meow-2.0.0.tgz", + "integrity": "sha512-X7rkdgy5Wxxp2MhCiAOkC3lqfkrJkt3iXvW4BY0rYQIn3GMvYvBTsAPEmHHTjTeVzBelrRcQa2F80rYfigz2+A==", + "license": "MIT", + "dependencies": { + "camelcase-keys": "^1.0.0", + "indent-string": "^1.1.0", + "minimist": "^1.1.0", + "object-assign": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base64url/node_modules/object-assign": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-1.0.0.tgz", + "integrity": "sha512-LpUkixU1BUMQ6bwUHbOue4IGGbdRbxi+IEZw7zHniw78erlxrKGHbhfLbHIsI35LGbGqys6QOrjVmLnD2ie+1A==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/basic-ftp": { "version": "5.0.5", "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.0.5.tgz", @@ -7540,6 +7859,15 @@ "node": ">=10.0.0" } }, + "node_modules/bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", + "license": "BSD-3-Clause", + "dependencies": { + "tweetnacl": "^0.14.3" + } + }, "node_modules/bech32": { "version": "1.1.4", "license": "MIT" @@ -7896,6 +8224,12 @@ "node": "*" } }, + "node_modules/buffer-equal-constant-time": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", + "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", + "license": "BSD-3-Clause" + }, "node_modules/buffer-from": { "version": "1.1.2", "license": "MIT" @@ -7917,6 +8251,15 @@ "node": ">=6.14.2" } }, + "node_modules/buildcheck": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/buildcheck/-/buildcheck-0.0.6.tgz", + "integrity": "sha512-8f9ZJCUXyT1M35Jx7MkBgmBMo3oHTTBIPLiY9xyL0pl3T5RwcPEY8cUHr5LBNfu/fk6c2T4DJZuVM/8ZZT2D2A==", + "optional": true, + "engines": { + "node": ">=10.0.0" + } + }, "node_modules/builtins": { "version": "5.0.1", "license": "MIT", @@ -7938,6 +8281,24 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/bunyan": { + "version": "1.8.15", + "resolved": "https://registry.npmjs.org/bunyan/-/bunyan-1.8.15.tgz", + "integrity": "sha512-0tECWShh6wUysgucJcBAoYegf3JJoZWibxdqhTm7OHPeT42qdjkZ29QCMcKwbgU1kiH+auSIasNRXMLWXafXig==", + "engines": [ + "node >=0.10.0" + ], + "license": "MIT", + "bin": { + "bunyan": "bin/bunyan" + }, + "optionalDependencies": { + "dtrace-provider": "~0.8", + "moment": "^2.19.3", + "mv": "~2", + "safe-json-stringify": "~1" + } + }, "node_modules/bytes": { "version": "3.1.2", "license": "MIT", @@ -8079,6 +8440,32 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/cacache/node_modules/tar": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "optional": true, + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/cacache/node_modules/tar/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "optional": true, + "engines": { + "node": ">=8" + } + }, "node_modules/cacheable-lookup": { "version": "5.0.4", "license": "MIT", @@ -8666,20 +9053,58 @@ "version": "0.0.1", "license": "MIT" }, - "node_modules/concurrently": { - "version": "8.2.2", - "dev": true, + "node_modules/concat-stream": { + "version": "1.4.11", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.4.11.tgz", + "integrity": "sha512-X3JMh8+4je3U1cQpG87+f9lXHDrqcb2MVLg9L7o8b1UZ0DzhRrUpdn65ttzu10PpJPPI3MQNkis+oha6TSA9Mw==", + "engines": [ + "node >= 0.8" + ], "license": "MIT", "dependencies": { - "chalk": "^4.1.2", - "date-fns": "^2.30.0", - "lodash": "^4.17.21", - "rxjs": "^7.8.1", - "shell-quote": "^1.8.1", - "spawn-command": "0.0.2", - "supports-color": "^8.1.1", - "tree-kill": "^1.2.2", - "yargs": "^17.7.2" + "inherits": "~2.0.1", + "readable-stream": "~1.1.9", + "typedarray": "~0.0.5" + } + }, + "node_modules/concat-stream/node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==", + "license": "MIT" + }, + "node_modules/concat-stream/node_modules/readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha512-+MeVjFf4L44XUkhM1eYbD8fyEsxcV81pqMSR5gblfcLCHfZvbrqy4/qYHE+/R5HoBUT11WV5O08Cr1n3YXkWVQ==", + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, + "node_modules/concat-stream/node_modules/string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==", + "license": "MIT" + }, + "node_modules/concurrently": { + "version": "8.2.2", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.2", + "date-fns": "^2.30.0", + "lodash": "^4.17.21", + "rxjs": "^7.8.1", + "shell-quote": "^1.8.1", + "spawn-command": "0.0.2", + "supports-color": "^8.1.1", + "tree-kill": "^1.2.2", + "yargs": "^17.7.2" }, "bin": { "conc": "dist/bin/concurrently.js", @@ -8952,6 +9377,27 @@ "node": ">=8" } }, + "node_modules/cpu-features": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/cpu-features/-/cpu-features-0.0.10.tgz", + "integrity": "sha512-9IkYqtX3YHPCzoVg1Py+o9057a3i0fp7S530UWokCSaFVTc7CwXPRiOjRjBQQ18ZCNafx78YfnG+HALxtVmOGA==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "buildcheck": "~0.0.6", + "nan": "^2.19.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/cpu-features/node_modules/nan": { + "version": "2.20.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.20.0.tgz", + "integrity": "sha512-bk3gXBZDGILuuo/6sKtr0DQmSThYHLtNCdSdXk9YkxD/jK6X2vmCyyXBBxyqZ4XcnzTyYEAThfX3DCEnLf6igw==", + "license": "MIT", + "optional": true + }, "node_modules/cpy": { "version": "9.0.1", "license": "MIT", @@ -10139,6 +10585,90 @@ "node": ">=6" } }, + "node_modules/docker-modem": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/docker-modem/-/docker-modem-5.0.3.tgz", + "integrity": "sha512-89zhop5YVhcPEt5FpUFGr3cDyceGhq/F9J+ZndQ4KfqNvfbJpPMfgeixFgUj5OjCYAboElqODxY5Z1EBsSa6sg==", + "license": "Apache-2.0", + "dependencies": { + "debug": "^4.1.1", + "readable-stream": "^3.5.0", + "split-ca": "^1.0.1", + "ssh2": "^1.15.0" + }, + "engines": { + "node": ">= 8.0" + } + }, + "node_modules/docker-modem/node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/docker-registry-client": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/docker-registry-client/-/docker-registry-client-3.4.0.tgz", + "integrity": "sha512-meDdmo1fa3r4D6WLSPb0CcRBpZs+egIHD8x+fydqAvsTomoUgMbcZDD77bVpBdkhwj+lDZUEWgvMCvAbcDBH4g==", + "license": "MPL-2.0", + "dependencies": { + "assert-plus": "^0.1.5", + "base64url": "1.x >=1.0.4", + "bunyan": "1.x >=1.3.3", + "jwk-to-pem": "1.2.0", + "jws": "3.1.0", + "restify-clients": "^1.4.0", + "restify-errors": "^3.0.0", + "strsplit": "1.x", + "tough-cookie": "2.0.x", + "vasync": "1.x >=1.6.1", + "verror": "1.x >=1.6.0", + "www-authenticate": "0.6.x >=0.6.2" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/dockerode": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/dockerode/-/dockerode-4.0.2.tgz", + "integrity": "sha512-9wM1BVpVMFr2Pw3eJNXrYYt6DT9k0xMcsSCjtPvyQ+xa1iPg/Mo3T/gUcwI0B2cczqCeCYRPF8yFYDwtFXT0+w==", + "license": "Apache-2.0", + "dependencies": { + "@balena/dockerignore": "^1.0.2", + "docker-modem": "^5.0.3", + "tar-fs": "~2.0.1" + }, + "engines": { + "node": ">= 8.0" + } + }, + "node_modules/dockerode/node_modules/chownr": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", + "license": "ISC" + }, + "node_modules/dockerode/node_modules/tar-fs": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.0.1.tgz", + "integrity": "sha512-6tzWDMeroL87uF/+lin46k+Q+46rAJ0SyPGz7OW7wTgblI273hsBqk2C1j0/xNadNLKDTUL9BukSjB7cwgmlPA==", + "license": "MIT", + "dependencies": { + "chownr": "^1.1.1", + "mkdirp-classic": "^0.5.2", + "pump": "^3.0.0", + "tar-stream": "^2.0.0" + } + }, "node_modules/doctrine": { "version": "3.0.0", "license": "Apache-2.0", @@ -10192,6 +10722,20 @@ "node": ">=0.10" } }, + "node_modules/dtrace-provider": { + "version": "0.8.8", + "resolved": "https://registry.npmjs.org/dtrace-provider/-/dtrace-provider-0.8.8.tgz", + "integrity": "sha512-b7Z7cNtHPhH9EJhNNbbeqTcXB8LGFFZhq1PGgEvpeHlzd36bhbdTWoE/Ba/YguqpBSlAPKnARWhVlhunCMwfxg==", + "hasInstallScript": true, + "license": "BSD-2-Clause", + "optional": true, + "dependencies": { + "nan": "^2.14.0" + }, + "engines": { + "node": ">=0.10" + } + }, "node_modules/duplex-to": { "version": "2.0.0", "license": "MIT" @@ -10250,6 +10794,15 @@ "node": ">=4.0.0" } }, + "node_modules/ecdsa-sig-formatter": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", + "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, "node_modules/eciesjs": { "version": "0.4.5", "license": "MIT", @@ -12499,6 +13052,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/extsprintf": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.4.1.tgz", + "integrity": "sha512-Wrk35e8ydCKDj/ArClo1VrPVmN8zph5V4AtHwIuHhvMXsKf73UT3BOD+azBIW+3wOJ4FhEH7zyaJCFvChjYvMA==", + "engines": [ + "node >=0.6.0" + ], + "license": "MIT" + }, "node_modules/eyes": { "version": "0.1.8", "engines": { @@ -12547,6 +13109,12 @@ "version": "2.0.6", "license": "MIT" }, + "node_modules/fast-safe-stringify": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-1.2.3.tgz", + "integrity": "sha512-QJYT/i0QYoiZBQ71ivxdyTqkwKkQ0oxACXHYxH2zYHJEgzi2LsbjgvtzTbLi1SZcF190Db2YP7I7eTsU2egOlw==", + "license": "MIT" + }, "node_modules/fastq": { "version": "1.15.0", "license": "ISC", @@ -14462,6 +15030,18 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-finite": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.1.0.tgz", + "integrity": "sha512-cdyMtqX/BOqqNBBiKlIVkytNHm49MtMlYyn1zxzvJKWmFMlGzm+ry5BBfYyeY9YmNKbRSo/o7OX9w9ale0wg3w==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/is-fullwidth-code-point": { "version": "1.0.0", "license": "MIT", @@ -15282,6 +15862,20 @@ "set-function-name": "^2.0.1" } }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, "node_modules/java-properties": { "version": "1.0.2", "license": "MIT", @@ -15497,6 +16091,56 @@ "version": "4.2.1", "license": "MIT" }, + "node_modules/jwa": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", + "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==", + "license": "MIT", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jwk-to-pem": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/jwk-to-pem/-/jwk-to-pem-1.2.0.tgz", + "integrity": "sha512-c/RUawllCF0Qfp+oJ2cm8DaeiIP7b2V1xljEY3mibpkRsF3RoL2ELW33D/VESwcrbLluMQYgOVSp9i/OlJa1gg==", + "license": "Apache-2.0", + "dependencies": { + "asn1.js": "^2.2.0", + "asn1.js-rfc3280": "^2.1.0", + "elliptic": "^3.0.4" + } + }, + "node_modules/jwk-to-pem/node_modules/bn.js": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-2.2.0.tgz", + "integrity": "sha512-nlotTGN6qr+NpeCb8d5mdXR47r6GXiyoX4fEeqBF2u9wp/3XgzIwyftMX9TE+StQRJSOUJtyYr9MVk0rn2ftAg==", + "license": "MIT" + }, + "node_modules/jwk-to-pem/node_modules/elliptic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-3.1.0.tgz", + "integrity": "sha512-kjzyQvz5tdIrz+O8EAaDU5oeICcg5mMevSFEEi/cprAl1GID1BoV/1tpRu56rDJ6tiXM2b+ZKh3mNrVhA3Y/2Q==", + "license": "MIT", + "dependencies": { + "bn.js": "^2.0.3", + "brorand": "^1.0.1", + "hash.js": "^1.0.0", + "inherits": "^2.0.1" + } + }, + "node_modules/jws": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jws/-/jws-3.1.0.tgz", + "integrity": "sha512-dIhjVxxfhs93IKornyqxfkx/H/fupqwrUzXAXu/zMkgnPyGH0qXKVtet0Fu7I7o0BlV3SDUkAKOCHpzPItPOoQ==", + "license": "MIT", + "dependencies": { + "base64url": "~1.0.4", + "jwa": "^1.1.0" + } + }, "node_modules/keccak": { "version": "3.0.4", "hasInstallScript": true, @@ -15522,6 +16166,12 @@ "node": ">= 6" } }, + "node_modules/keep-alive-agent": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/keep-alive-agent/-/keep-alive-agent-0.0.1.tgz", + "integrity": "sha512-fF6aj9/XFwJiE/4zihw/ZdXg+KeyU4nFvmutF+PkAVadSGqP298+Zm6IzWFzgeDBgvLk3o8boBxNtd1g5Kdjfg==", + "license": "MIT" + }, "node_modules/keyv": { "version": "4.5.4", "license": "MIT", @@ -17803,6 +18453,66 @@ "version": "0.0.8", "license": "ISC" }, + "node_modules/mv": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/mv/-/mv-2.1.1.tgz", + "integrity": "sha512-at/ZndSy3xEGJ8i0ygALh8ru9qy7gWW1cmkaqBN29JmMlIvM//MEO9y1sk/avxuwnPcfhkejkLsuPxH81BrkSg==", + "license": "MIT", + "optional": true, + "dependencies": { + "mkdirp": "~0.5.1", + "ncp": "~2.0.0", + "rimraf": "~2.4.0" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/mv/node_modules/glob": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/glob/-/glob-6.0.4.tgz", + "integrity": "sha512-MKZeRNyYZAVVVG1oZeLaWie1uweH40m9AZwIwxyPbTSX4hHrVYSzLg0Ro5Z5R7XKkIX+Cc6oD1rqeDJnwsB8/A==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "license": "ISC", + "optional": true, + "dependencies": { + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "2 || 3", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + } + }, + "node_modules/mv/node_modules/mkdirp": { + "version": "0.5.6", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", + "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", + "license": "MIT", + "optional": true, + "dependencies": { + "minimist": "^1.2.6" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/mv/node_modules/rimraf": { + "version": "2.4.5", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.4.5.tgz", + "integrity": "sha512-J5xnxTyqaiw06JjMftq7L9ouA448dw/E7dKghkP9WpKNuwmARNNg+Gk8/u5ryb9N/Yo2+z3MCwuqFK/+qPOPfQ==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "license": "ISC", + "optional": true, + "dependencies": { + "glob": "^6.0.1" + }, + "bin": { + "rimraf": "bin.js" + } + }, "node_modules/n3": { "version": "1.17.2", "license": "MIT", @@ -17875,6 +18585,16 @@ "version": "1.4.0", "license": "MIT" }, + "node_modules/ncp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ncp/-/ncp-2.0.0.tgz", + "integrity": "sha512-zIdGUrPRFTUELUvr3Gmc7KZ2Sw/h1PiVM0Af/oHB6zgnV1ikqSfRk+TOufi79aHYCW3NiOXmr1BP5nWbzojLaA==", + "license": "MIT", + "optional": true, + "bin": { + "ncp": "bin/ncp" + } + }, "node_modules/negotiator": { "version": "0.6.3", "license": "MIT", @@ -18089,6 +18809,32 @@ "node": ">=6" } }, + "node_modules/node-gyp/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "optional": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/node-gyp/node_modules/tar": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "optional": true, + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/node-preload": { "version": "0.2.1", "license": "MIT", @@ -21661,6 +22407,11 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==" + }, "node_modules/package-json/node_modules/@sindresorhus/is": { "version": "5.6.0", "license": "MIT", @@ -22030,6 +22781,34 @@ "version": "1.0.7", "license": "MIT" }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==" + }, + "node_modules/path-scurry/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/path-to-regexp": { "version": "0.1.10", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", @@ -22662,6 +23441,14 @@ "node": ">=6.0" } }, + "node_modules/precond": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/precond/-/precond-0.2.3.tgz", + "integrity": "sha512-QCYG84SgGyGzqJ/vlMsxeXd/pgL/I94ixdNFyh1PusWmTCyVfPJjZ1K1jvHtsbfnXQs2TSkEP2fR7QiMZAnKFQ==", + "engines": { + "node": ">= 0.6" + } + }, "node_modules/prelude-ls": { "version": "1.2.1", "license": "MIT", @@ -25027,6 +25814,21 @@ "node": ">=4" } }, + "node_modules/repeating": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/repeating/-/repeating-1.1.3.tgz", + "integrity": "sha512-Nh30JLeMHdoI+AsQ5eblhZ7YlTsM9wiJQe/AHIunlK3KWzvXhXb36IJ7K1IOeRjIOtzMjdUHjwXUFxKJoPTSOg==", + "license": "MIT", + "dependencies": { + "is-finite": "^1.0.0" + }, + "bin": { + "repeating": "cli.js" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/require-directory": { "version": "2.1.1", "license": "MIT", @@ -25096,6 +25898,112 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/restify-clients": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/restify-clients/-/restify-clients-1.6.0.tgz", + "integrity": "sha512-q5kF/KHkwC10PhEjZkgQnWCIVCq5rlKF+fbqjl51e28ArkztJNI5czFzwCd/4Qz3HRrfwidk1XcAKLxY75dT6w==", + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0", + "backoff": "^2.4.1", + "bunyan": "^1.8.3", + "fast-safe-stringify": "^1.1.3", + "keep-alive-agent": "0.0.1", + "lodash": "^4.7.0", + "lru-cache": "^4.0.1", + "mime": "^1.3.4", + "once": "^1.3.2", + "restify-errors": "^3.1.0", + "semver": "^5.0.1", + "tunnel-agent": "^0.6.0", + "uuid": "^3.0.1" + }, + "optionalDependencies": { + "dtrace-provider": "^0.8.3" + } + }, + "node_modules/restify-clients/node_modules/assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/restify-clients/node_modules/lru-cache": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", + "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", + "license": "ISC", + "dependencies": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "node_modules/restify-clients/node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/restify-clients/node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/restify-clients/node_modules/uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", + "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", + "license": "MIT", + "bin": { + "uuid": "bin/uuid" + } + }, + "node_modules/restify-clients/node_modules/yallist": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", + "integrity": "sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==", + "license": "ISC" + }, + "node_modules/restify-errors": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restify-errors/-/restify-errors-3.1.0.tgz", + "integrity": "sha512-4RDQs4zirMPXH03y5LKIFoAs+LvO9HTd5Ig4KfD5h4yRtTC5aWK/F2L1g9O2CSjTsgNIc+d0ib0f1rSob3FjNg==", + "license": "MIT", + "dependencies": { + "assert-plus": "^0.2.0", + "lodash": "^3.10.1", + "verror": "^1.6.0" + } + }, + "node_modules/restify-errors/node_modules/assert-plus": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-0.2.0.tgz", + "integrity": "sha512-u1L0ZLywRziOVjUhRxI0Qg9G+4RnFB9H/Rq40YWn0dieDgO7vAYeJz6jKAO6t/aruzlDFLAPkQTT87e+f8Imaw==", + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/restify-errors/node_modules/lodash": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-3.10.1.tgz", + "integrity": "sha512-9mDDwqVIma6OZX79ZlDACZl8sBm0TEnkf99zV3iMA4GzkIT/9hiqP5mY0HoT1iNLCrKc/R1HByV+yJfRWVJryQ==", + "license": "MIT" + }, "node_modules/restore-cursor": { "version": "2.0.0", "license": "MIT", @@ -25379,6 +26287,13 @@ ], "license": "MIT" }, + "node_modules/safe-json-stringify": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/safe-json-stringify/-/safe-json-stringify-1.2.0.tgz", + "integrity": "sha512-gH8eh2nZudPQO6TytOvbxnuhYBOvDBBLW52tz5q6X58lJcd/tkmqFR+5Z9adS8aJtURSXWThWy/xJtJwixErvg==", + "license": "MIT", + "optional": true + }, "node_modules/safe-regex": { "version": "2.1.1", "dev": true, @@ -26722,6 +27637,12 @@ "node": "*" } }, + "node_modules/split-ca": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/split-ca/-/split-ca-1.0.1.tgz", + "integrity": "sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ==", + "license": "ISC" + }, "node_modules/split2": { "version": "3.2.2", "license": "ISC", @@ -26769,11 +27690,59 @@ } } }, + "node_modules/sqlite3/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "engines": { + "node": ">=8" + } + }, "node_modules/sqlite3/node_modules/node-addon-api": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz", "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==" }, + "node_modules/sqlite3/node_modules/tar": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/ssh2": { + "version": "1.16.0", + "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.16.0.tgz", + "integrity": "sha512-r1X4KsBGedJqo7h8F5c4Ybpcr5RjyP+aWIG007uBPRjmdQWfEiVLzSK71Zji1B9sKxwaCvD8y8cwSkYrlLiRRg==", + "hasInstallScript": true, + "dependencies": { + "asn1": "^0.2.6", + "bcrypt-pbkdf": "^1.0.2" + }, + "engines": { + "node": ">=10.16.0" + }, + "optionalDependencies": { + "cpu-features": "~0.0.10", + "nan": "^2.20.0" + } + }, + "node_modules/ssh2/node_modules/nan": { + "version": "2.20.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.20.0.tgz", + "integrity": "sha512-bk3gXBZDGILuuo/6sKtr0DQmSThYHLtNCdSdXk9YkxD/jK6X2vmCyyXBBxyqZ4XcnzTyYEAThfX3DCEnLf6igw==", + "license": "MIT", + "optional": true + }, "node_modules/ssri": { "version": "8.0.1", "resolved": "https://registry.npmjs.org/ssri/-/ssri-8.0.1.tgz", @@ -27085,6 +28054,28 @@ "node": ">=0.10.0" } }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, "node_modules/string-width/node_modules/ansi-regex": { "version": "2.1.1", "license": "MIT", @@ -27169,6 +28160,18 @@ "node": ">=8" } }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/strip-bom": { "version": "5.0.0", "license": "MIT", @@ -27233,6 +28236,12 @@ "node": ">=0.8.0" } }, + "node_modules/strsplit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strsplit/-/strsplit-1.0.0.tgz", + "integrity": "sha512-efXqQImOEC0nyQqFzPUqa7NvF4B0ZPW2YM5nS+uXTB76sQt002brfZWQo/NSkAt771RTvv/brVQqtxJL7UBHMw==", + "license": "MIT" + }, "node_modules/sumchecker": { "version": "3.0.1", "license": "Apache-2.0", @@ -27458,19 +28467,19 @@ } }, "node_modules/tar": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", - "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz", + "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==", "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^5.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.0.1", + "mkdirp": "^3.0.1", + "yallist": "^5.0.0" }, "engines": { - "node": ">=10" + "node": ">=18" } }, "node_modules/tar-fs": { @@ -27550,12 +28559,135 @@ "node": ">= 6" } }, + "node_modules/tar/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/tar/node_modules/chownr": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", + "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", + "engines": { + "node": ">=18" + } + }, + "node_modules/tar/node_modules/foreground-child": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.0.tgz", + "integrity": "sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==", + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/tar/node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/tar/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/tar/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/tar/node_modules/minizlib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.1.tgz", + "integrity": "sha512-umcy022ILvb5/3Djuu8LWeqUa8D68JaBzlttKeMWen48SjabqS3iY5w/vzeMzMUNhLDifyhbOwKDSznB1vvrwg==", + "dependencies": { + "minipass": "^7.0.4", + "rimraf": "^5.0.5" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/tar/node_modules/mkdirp": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz", + "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==", + "bin": { + "mkdirp": "dist/cjs/src/bin.js" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/tar/node_modules/rimraf": { + "version": "5.0.10", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-5.0.10.tgz", + "integrity": "sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==", + "dependencies": { + "glob": "^10.3.7" + }, + "bin": { + "rimraf": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/tar/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/tar/node_modules/yallist": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", - "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", + "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", "engines": { - "node": ">=8" + "node": ">=18" } }, "node_modules/temp-dir": { @@ -27721,6 +28853,16 @@ "node": ">=6" } }, + "node_modules/tough-cookie": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.0.0.tgz", + "integrity": "sha512-qYeH1zA+4+36nVi2waxBoFcbL54iInWYs6NuMQztwijcfhPZqeCm/fjRkDrnEtkYzOIh19SkKrjs5A+VDx+5sA==", + "deprecated": "ReDoS vulnerability parsing Set-Cookie https://nodesecurity.io/advisories/130", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/tr46": { "version": "0.0.3", "license": "MIT" @@ -27989,6 +29131,12 @@ "node": "*" } }, + "node_modules/tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==", + "license": "Unlicense" + }, "node_modules/type": { "version": "1.2.0", "license": "ISC" @@ -28090,6 +29238,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/typedarray": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.7.tgz", + "integrity": "sha512-ueeb9YybpjhivjbHP2LdFDAjbS948fGEPj+ACAMs4xCMmh72OCOMQWBQKlaN4ZNQ04yfLSDLSx1tGRIoWimObQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/typedarray-to-buffer": { "version": "3.1.5", "license": "MIT", @@ -28647,6 +29804,68 @@ "node": ">= 0.8" } }, + "node_modules/vasync": { + "version": "1.6.4", + "resolved": "https://registry.npmjs.org/vasync/-/vasync-1.6.4.tgz", + "integrity": "sha512-3oQMomVgQgHzNe5iKuT8PGOhMCQcg1wfh00Nh/Kl39ERdTlw/uNS7kbrhEraDMDKWHdDdc0iBFahPEd/Ft2b+A==", + "engines": [ + "node >=0.6.0" + ], + "license": "MIT", + "dependencies": { + "verror": "1.6.0" + } + }, + "node_modules/vasync/node_modules/extsprintf": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.2.0.tgz", + "integrity": "sha512-T3PYC6HucmF4OfunfZb5d1nRvTSvWYhsr/Og33HANcCuCtGPUtWVyt/tTs8SU9sR0SGh5Z/xQCuX/D72ph2H+A==", + "engines": [ + "node >=0.6.0" + ], + "license": "MIT" + }, + "node_modules/vasync/node_modules/verror": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.6.0.tgz", + "integrity": "sha512-bIOaZx4+Bf6a7sIORfmYnyKLDLk/lhVym6rjYlq+vkitYKnhFmUpmPpDTCltWFrUTlGKs6sCeoDWfMA0oOOneA==", + "engines": [ + "node >=0.6.0" + ], + "license": "MIT", + "dependencies": { + "extsprintf": "1.2.0" + } + }, + "node_modules/verror": { + "version": "1.10.1", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.1.tgz", + "integrity": "sha512-veufcmxri4e3XSrT0xwfUR7kguIkaxBeosDg00yDWhk49wdwkSUrvvsm7nc75e1PUyvIeZj6nS8VQRYz2/S4Xg==", + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/verror/node_modules/assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/verror/node_modules/core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==", + "license": "MIT" + }, "node_modules/version-guard": { "version": "1.1.1", "license": "0BSD", @@ -29074,6 +30293,44 @@ "node": ">=4" } }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/wrap-ansi/node_modules/ansi-regex": { "version": "3.0.1", "license": "MIT", @@ -29143,6 +30400,14 @@ } } }, + "node_modules/www-authenticate": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/www-authenticate/-/www-authenticate-0.6.3.tgz", + "integrity": "sha512-8VkdLBJiBh5aXlJvcVaPykwSI//OA+Sxw7g84vIyCqoqlXtLupGNhyXxbgVuZ7g5ZS+lCJ4bTtcw/gJciqEuAg==", + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/xdg-basedir": { "version": "5.1.0", "license": "MIT", diff --git a/package.json b/package.json index 43dd377a4..cb2802fda 100644 --- a/package.json +++ b/package.json @@ -89,6 +89,8 @@ "base58-js": "^2.0.0", "cors": "^2.8.5", "delay": "^5.0.0", + "docker-registry-client": "^3.4.0", + "dockerode": "^4.0.2", "dotenv": "^16.3.1", "eciesjs": "^0.4.5", "eth-crypto": "^2.6.0", @@ -110,6 +112,7 @@ "sinon": "^17.0.1", "sqlite3": "^5.1.7", "stream-concat": "^1.0.0", + "tar": "^7.4.3", "ts-node": "^10.9.1", "tsoa": "^5.1.1", "uint8arrays": "^4.0.6", @@ -121,6 +124,7 @@ "devDependencies": { "@types/chai": "^4.3.10", "@types/cors": "^2.8.17", + "@types/dockerode": "^3.3.31", "@types/express": "^4.17.17", "@types/ip": "^1.1.3", "@types/lzma-native": "^4.0.4", diff --git a/scripts/ocean-node-quickstart.sh b/scripts/ocean-node-quickstart.sh index 4c3085338..4413f3e04 100755 --- a/scripts/ocean-node-quickstart.sh +++ b/scripts/ocean-node-quickstart.sh @@ -121,6 +121,43 @@ else echo "No input provided, the Ocean Node might not be accessible from other nodes." fi +read -p "Do you want to run docker C2D jobs on your Ocean Node [ y/n ]: " run_c2d_jobs + +if [ "$run_c2d_jobs" == "y" ]; then + read -p "Enter the docker socket path: " DOCKER_SOCKET_PATH + DOCKER_SOCKET_PATH=${DOCKER_SOCKET_PATH:-''} + read -p "Enter the docker protocol: " DOCKER_PROTOCOL + DOCKER_PROTOCOL=${DOCKER_PROTOCOL:-''} + read -p "Enter the docker host: " DOCKER_HOST + DOCKER_HOST=${DOCKER_HOST:-''} + read -p "Enter the docker port: " DOCKER_PORT + DOCKER_PORT=${DOCKER_PORT:-0} + read -p "Enter the docker certificate authority path: " DOCKER_CA_PATH + DOCKER_CA_PATH=${DOCKER_CA_PATH:-''} + read -p "Enter the docker certificate path: " DOCKER_CERT_PATH + DOCKER_CERT_PATH=${DOCKER_CERT_PATH:-''} + read -p "Enter the docker key path: " DOCKER_KEY_PATH + DOCKER_KEY_PATH=${DOCKER_KEY_PATH:-''} + echo "" + echo "########################################################" + echo "### Docker Engine Compute Environments Configuration ###" + echo "########################################################" + echo "Check 'ComputeEnvironment' definition for more details on the format" + echo "_____________________________________________________" + echo "" + read -p "Do you want to add a specific docker environment (payed) configuration? + (Hint: You can enter multiple in JSON format) [ y/n ]: " c2d_payed_env + if [ "$c2d_payed_env" == "y" ]; then + read -p "Enter the array of payed docker environment(s): " DOCKER_COMPUTE_ENVIRONMENTS + fi + read -p "Do you want to add a specific docker environment (free) configuration? + (Hint: You can enter only one in JSON format) [ y/n ]: " c2d_free_env + if [ "$c2d_free_env" == "y" ]; then + read -p "Enter the settings of the free docker environment: " DOCKER_FREE_COMPUTE + fi +else + echo "Running node without docker C2D capabilities!" +fi cat < docker-compose.yml services: @@ -181,6 +218,15 @@ services: # P2P_ENABLE_CIRCUIT_RELAY_CLIENT: '' # P2P_BOOTSTRAP_NODES: '' # P2P_FILTER_ANNOUNCED_ADDRESSES: '' + DOCKER_SOCKET_PATH: '$DOCKER_SOCKET_PATH' + DOCKER_PROTOCOL: '$DOCKER_PROTOCOL' + DOCKER_HOST: '$DOCKER_HOST' + DOCKER_PORT: '$DOCKER_PORT' + DOCKER_CA_PATH: '$DOCKER_CA_PATH' + DOCKER_CERT_PATH: '$DOCKER_CERT_PATH' + DOCKER_KEY_PATH: '$DOCKER_KEY_PATH' + DOCKER_COMPUTE_ENVIRONMENTS: '$DOCKER_COMPUTE_ENVIRONMENTS' + DOCKER_FREE_COMPUTE: '$DOCKER_FREE_COMPUTE' networks: - ocean_network depends_on: diff --git a/src/@types/C2D.ts b/src/@types/C2D.ts deleted file mode 100644 index f1297ca22..000000000 --- a/src/@types/C2D.ts +++ /dev/null @@ -1,199 +0,0 @@ -import type { MetadataAlgorithm } from './DDO/Metadata.js' - -export enum C2DClusterType { - // eslint-disable-next-line no-unused-vars - OPF_K8 = 0, - // eslint-disable-next-line no-unused-vars - NODE_LOCAL = 1 -} - -export interface C2DClusterInfo { - /** Type of cluster: K8, Node local, etc */ - type: C2DClusterType - /** Hash of cluster. hash(url) for remote, hash(nodeId) for local */ - hash: string - /** Connection URI */ - connection?: string -} - -export interface ComputeEnvironment { - id: string - cpuNumber: number - cpuType: string - gpuNumber: number - gpuType: string - ramGB: number - diskGB: number - priceMin: number - desc: string - currentJobs: number - maxJobs: number - consumerAddress: string - storageExpiry: number - maxJobDuration: number - lastSeen: number - chainId?: number - feeToken: string -} - -export interface ComputeEnvByChain { - [chainId: number]: ComputeEnvironment[] -} - -export type ComputeResultType = - | 'algorithmLog' - | 'output' - | 'configrationLog' - | 'publishLog' - -export interface ComputeResult { - filename: string - filesize: number - type: ComputeResultType - index?: number -} - -export interface ComputeJob { - owner: string - did?: string - jobId: string - dateCreated: string - dateFinished: string - status: number - statusText: string - results: ComputeResult[] - inputDID?: string[] - algoDID?: string - agreementId?: string - expireTimestamp: number -} - -export interface ComputeOutput { - publishAlgorithmLog?: boolean - publishOutput?: boolean - providerAddress?: string - providerUri?: string - metadataUri?: string - nodeUri?: string - owner?: string - secretStoreUri?: string - whitelist?: string[] -} - -export interface ComputeAsset { - url?: string - documentId: string - serviceId: string - transferTxId?: string - userdata?: { [key: string]: any } -} - -export interface ComputeAlgorithm { - documentId?: string - serviceId?: string - url?: string - meta?: MetadataAlgorithm - transferTxId?: string - algocustomdata?: { [key: string]: any } - userdata?: { [key: string]: any } -} - -/* The following are specific to OPF_k8 compute engine */ -export interface OPFK8ComputeStageInput { - index: number - id?: string - remote?: any - url?: string[] -} -export interface OPFK8ComputeStageAlgorithm { - id?: string - url?: string - remote?: any - rawcode?: string - container?: { - /** - * The command to execute, or script to run inside the Docker image. - * @type {string} - */ - entrypoint: string - - /** - * Name of the Docker image. - * @type {string} - */ - image: string - - /** - * Tag of the Docker image. - * @type {string} - */ - tag: string - } -} - -export interface OPFK8ComputeOutput { - // this is a copy of ComputeOutput, but they could diverge in the future - publishAlgorithmLog?: boolean - publishOutput?: boolean - providerAddress?: string - providerUri?: string - metadataUri?: string - nodeUri?: string - owner?: string - secretStoreUri?: string - whitelist?: string[] -} -export interface OPFK8ComputeStage { - index: number - input: OPFK8ComputeStageInput[] - algorithm: OPFK8ComputeStageAlgorithm - compute?: {} - output: OPFK8ComputeOutput -} - -export interface OPFK8ComputeWorkflow { - stages: OPFK8ComputeStage[] -} -export interface OPFK8ComputeStart { - workflow: OPFK8ComputeWorkflow - owner: string - agreementId: string - providerSignature: string - providerAddress: string - environment: string - validUntil: number - nonce: number - chainId: number -} - -export interface OPFK8ComputeStop { - jobId: string - owner: string - agreementId?: string - providerSignature: string // message=owner+jobId - providerAddress: string - nonce: number -} - -export interface OPFK8ComputeGetStatus { - agreementId?: string - jobId?: string - owner?: string - providerSignature: string // message=owner+jobId(if any) - providerAddress: string - nonce: number -} - -export interface OPFK8ComputeGetResult { - jobId: string - owner: string - index: number - providerSignature: string // message=owner+jobId - providerAddress: string - nonce: number -} - -export interface AlgoChecksums { - files: string - container: string -} diff --git a/src/@types/C2D/C2D.ts b/src/@types/C2D/C2D.ts new file mode 100644 index 000000000..fb3ba5957 --- /dev/null +++ b/src/@types/C2D/C2D.ts @@ -0,0 +1,216 @@ +import type { MetadataAlgorithm } from '../DDO/Metadata.js' +import type { BaseFileObject } from '../fileObject.js' +export enum C2DClusterType { + // eslint-disable-next-line no-unused-vars + OPF_K8 = 0, + // eslint-disable-next-line no-unused-vars + NODE_LOCAL = 1, + // eslint-disable-next-line no-unused-vars + DOCKER = 2 +} + +export interface C2DClusterInfo { + /** Type of cluster: K8, Node local, etc */ + type: C2DClusterType + /** Hash of cluster. hash(url) for remote, hash(nodeId) for local */ + hash: string + /** Connection URI */ + connection?: any + /** Folder for storing data */ + tempFolder?: string +} + +export interface ComputeEnvironmentBaseConfig { + cpuNumber: number + ramGB: number + diskGB: number + desc: string + maxJobs: number + storageExpiry: number + maxJobDuration: number + chainId?: number + feeToken: string + priceMin: number +} + +export interface RunningPlatform { + architecture: string + os: string +} +export interface ComputeEnvironment extends ComputeEnvironmentBaseConfig { + id: string + + cpuType?: string + gpuNumber?: number + gpuType?: string + currentJobs: number + consumerAddress: string + lastSeen?: number + free: boolean + platform?: RunningPlatform[] // array due to k8 support +} + +export interface C2DDockerConfig { + socketPath: string + protocol: string + host: string + port: number + caPath: string + certPath: string + keyPath: string + environments: ComputeEnvironment[] + freeComputeOptions?: ComputeEnvironment +} + +export interface ComputeEnvByChain { + [chainId: number]: ComputeEnvironment[] +} + +export type ComputeResultType = + | 'algorithmLog' + | 'output' + | 'configrationLog' + | 'publishLog' + +export interface ComputeResult { + filename: string + filesize: number + type: ComputeResultType + index?: number +} + +export interface ComputeJob { + owner: string + did?: string + jobId: string + dateCreated: string + dateFinished: string + status: number + statusText: string + results: ComputeResult[] + inputDID?: string[] + algoDID?: string + agreementId?: string + expireTimestamp: number + environment?: string +} + +export interface ComputeOutput { + publishAlgorithmLog?: boolean + publishOutput?: boolean + providerAddress?: string + providerUri?: string + metadataUri?: string + nodeUri?: string + owner?: string + secretStoreUri?: string + whitelist?: string[] +} + +export interface ComputeAsset { + fileObject?: BaseFileObject + documentId?: string + serviceId?: string + transferTxId?: string + userdata?: { [key: string]: any } +} + +export interface ComputeAlgorithm { + documentId?: string + serviceId?: string + fileObject?: BaseFileObject + meta?: MetadataAlgorithm + transferTxId?: string + algocustomdata?: { [key: string]: any } + userdata?: { [key: string]: any } +} + +export interface AlgoChecksums { + files: string + container: string +} + +// this is the internal structure +export interface DBComputeJob extends ComputeJob { + clusterHash: string + configlogURL: string + publishlogURL: string + algologURL: string + outputsURL: string + stopRequested: boolean + algorithm: ComputeAlgorithm + assets: ComputeAsset[] + isRunning: boolean + isStarted: boolean + containerImage: string +} + +// make sure we keep them both in sync +export enum C2DStatusNumber { + // eslint-disable-next-line no-unused-vars + JobStarted = 0, + // eslint-disable-next-line no-unused-vars + PullImage = 10, + // eslint-disable-next-line no-unused-vars + ConfiguringVolumes = 20, + // eslint-disable-next-line no-unused-vars + VolumeCreationFailed = 21, + // eslint-disable-next-line no-unused-vars + ContainerCreationFailed = 22, + // eslint-disable-next-line no-unused-vars + Provisioning = 30, + // eslint-disable-next-line no-unused-vars + DataProvisioningFailed = 31, + // eslint-disable-next-line no-unused-vars + AlgorithmProvisioningFailed = 32, + // eslint-disable-next-line no-unused-vars + DataUploadFailed = 32, + // eslint-disable-next-line no-unused-vars + RunningAlgorithm = 40, + // eslint-disable-next-line no-unused-vars + AlgorithmFailed = 41, + // eslint-disable-next-line no-unused-vars + FilteringResults = 50, + // eslint-disable-next-line no-unused-vars + PublishingResults = 60, + // eslint-disable-next-line no-unused-vars + ResultsFetchFailed = 61, + // eslint-disable-next-line no-unused-vars + ResultsUploadFailed = 62, + // eslint-disable-next-line no-unused-vars + JobFinished = 70 +} +export enum C2DStatusText { + // eslint-disable-next-line no-unused-vars + JobStarted = 'Job started', + // eslint-disable-next-line no-unused-vars + PullImage = 'Pulling algorithm image', + // eslint-disable-next-line no-unused-vars + ConfiguringVolumes = 'Configuring volumes', + // eslint-disable-next-line no-unused-vars + VolumeCreationFailed = 'Volume creation failed', + // eslint-disable-next-line no-unused-vars + ContainerCreationFailed = 'Container creation failed', + // eslint-disable-next-line no-unused-vars + Provisioning = 'Provisioning data', + // eslint-disable-next-line no-unused-vars + DataProvisioningFailed = 'Data provisioning failed', + // eslint-disable-next-line no-unused-vars + AlgorithmProvisioningFailed = 'Algorithm provisioning failed', + // eslint-disable-next-line no-unused-vars + DataUploadFailed = 'Data upload to container failed', + // eslint-disable-next-line no-unused-vars + RunningAlgorithm = 'Running algorithm ', + // eslint-disable-next-line no-unused-vars + AlgorithmFailed = 'Failed to run algorithm', + // eslint-disable-next-line no-unused-vars + FilteringResults = 'Filtering results', + // eslint-disable-next-line no-unused-vars + PublishingResults = 'Publishing results', + // eslint-disable-next-line no-unused-vars + ResultsFetchFailed = 'Failed to get outputs folder from container', + // eslint-disable-next-line no-unused-vars + ResultsUploadFailed = 'Failed to upload results to storage', + // eslint-disable-next-line no-unused-vars + JobFinished = 'Job finished' +} diff --git a/src/@types/C2D/C2D_OPFK8.ts b/src/@types/C2D/C2D_OPFK8.ts new file mode 100644 index 000000000..9c894bd38 --- /dev/null +++ b/src/@types/C2D/C2D_OPFK8.ts @@ -0,0 +1,94 @@ +/* The following are specific to OPF_k8 compute engine */ +export interface OPFK8ComputeStageInput { + index: number + id?: string + remote?: any + url?: string[] +} +export interface OPFK8ComputeStageAlgorithm { + id?: string + url?: string + remote?: any + rawcode?: string + container?: { + /** + * The command to execute, or script to run inside the Docker image. + * @type {string} + */ + entrypoint: string + + /** + * Name of the Docker image. + * @type {string} + */ + image: string + + /** + * Tag of the Docker image. + * @type {string} + */ + tag: string + } +} + +export interface OPFK8ComputeOutput { + // this is a copy of ComputeOutput, but they could diverge in the future + publishAlgorithmLog?: boolean + publishOutput?: boolean + providerAddress?: string + providerUri?: string + metadataUri?: string + nodeUri?: string + owner?: string + secretStoreUri?: string + whitelist?: string[] +} +export interface OPFK8ComputeStage { + index: number + input: OPFK8ComputeStageInput[] + algorithm: OPFK8ComputeStageAlgorithm + compute?: {} + output: OPFK8ComputeOutput +} + +export interface OPFK8ComputeWorkflow { + stages: OPFK8ComputeStage[] +} +export interface OPFK8ComputeStart { + workflow: OPFK8ComputeWorkflow + owner: string + agreementId: string + providerSignature: string + providerAddress: string + environment: string + validUntil: number + nonce: number + chainId: number +} + +export interface OPFK8ComputeStop { + jobId: string + owner: string + agreementId?: string + providerSignature: string // message=owner+jobId + providerAddress: string + nonce: number +} + +export interface OPFK8ComputeGetStatus { + agreementId?: string + jobId?: string + owner?: string + providerSignature: string // message=owner+jobId(if any) + providerAddress: string + nonce: number +} + +export interface OPFK8ComputeGetResult { + jobId: string + owner: string + index: number + providerSignature: string // message=owner+jobId + providerAddress: string + nonce: number +} diff --git a/src/@types/OceanNode.ts b/src/@types/OceanNode.ts index f221a30d8..392479251 100644 --- a/src/@types/OceanNode.ts +++ b/src/@types/OceanNode.ts @@ -1,6 +1,6 @@ import { Stream } from 'stream' import { RPCS } from './blockchain' -import { C2DClusterInfo } from './C2D' +import { C2DClusterInfo } from './C2D/C2D' import { FeeStrategy } from './Fees' import { Schema } from '../components/database' @@ -61,15 +61,6 @@ export interface OceanNodeP2PConfig { autoDialInterval: number } -export interface OceanNodeDockerConfig { - socketPath?: string - protocol?: string - host?: string - port?: number - caPath?: string - certPath?: string - keyPath?: string -} export interface OceanNodeConfig { authorizedDecrypters: string[] allowedValidators: string[] @@ -86,7 +77,6 @@ export interface OceanNodeConfig { indexingNetworks?: RPCS c2dClusters: C2DClusterInfo[] c2dNodeUri: string - dockerConfig?: OceanNodeDockerConfig accountPurgatoryUrl: string assetPurgatoryUrl: string allowedAdmins?: string[] @@ -139,7 +129,7 @@ export interface OceanNodeStatus { codeHash?: string allowedAdmins?: string[] // detailed information - c2dClusters?: C2DClusterInfo[] + c2dClusters?: any[] supportedSchemas?: Schema[] } diff --git a/src/@types/commands.ts b/src/@types/commands.ts index 763e636a4..212accf0a 100644 --- a/src/@types/commands.ts +++ b/src/@types/commands.ts @@ -1,7 +1,7 @@ import { ValidateParams } from '../components/httpRoutes/validateCommands.js' import { DDO } from './DDO/DDO' import { P2PCommandResponse } from './OceanNode' -import type { ComputeAsset, ComputeAlgorithm, ComputeOutput } from './C2D' +import type { ComputeAsset, ComputeAlgorithm, ComputeOutput } from './C2D/C2D.js' import { ArweaveFileObject, FileObjectType, @@ -65,7 +65,9 @@ export interface ValidateDDOCommand extends Command { ddo: DDO } -export interface StatusCommand extends Command {} +export interface StatusCommand extends Command { + detailed?: boolean +} export interface DetailedStatusCommand extends StatusCommand {} export interface EchoCommand extends Command {} @@ -153,6 +155,7 @@ export interface ComputeInitializeCommand extends Command { algorithm: ComputeAlgorithm compute: ComputeDetails consumerAddress: string + signature?: string } export interface ComputeStartCommand extends Command { @@ -161,8 +164,15 @@ export interface ComputeStartCommand extends Command { nonce: string environment: string algorithm: ComputeAlgorithm - dataset: ComputeAsset - additionalDatasets?: ComputeAsset[] + datasets?: ComputeAsset[] + output?: ComputeOutput +} +export interface FreeComputeStartCommand extends Command { + consumerAddress: string + signature: string + nonce: string + algorithm: ComputeAlgorithm + datasets?: ComputeAsset[] output?: ComputeOutput } @@ -181,6 +191,12 @@ export interface ComputeGetResultCommand extends Command { jobId: string index: number } +export interface ComputeGetStreamableLogsCommand extends Command { + consumerAddress: string + signature: string + nonce: string + jobId: string +} export interface ComputeGetStatusCommand extends Command { consumerAddress?: string diff --git a/src/@types/docker-registry-lient.ts b/src/@types/docker-registry-lient.ts new file mode 100644 index 000000000..4d858e257 --- /dev/null +++ b/src/@types/docker-registry-lient.ts @@ -0,0 +1 @@ +declare module 'docker-registry-client' diff --git a/src/@types/index.ts b/src/@types/index.ts index 8a5d367bc..8ac35eeb1 100644 --- a/src/@types/index.ts +++ b/src/@types/index.ts @@ -1,3 +1,3 @@ export * from './OceanNode' -export * from './C2D' +export * from './C2D/C2D' export * from './Typesense' diff --git a/src/OceanNode.ts b/src/OceanNode.ts index ba40feac9..37da3c89a 100644 --- a/src/OceanNode.ts +++ b/src/OceanNode.ts @@ -60,7 +60,13 @@ export class OceanNode { if (this.c2dEngines) { await this.c2dEngines.stopAllEngines() } - if (_config && _config.c2dClusters) this.c2dEngines = new C2DEngines(_config) + if (_config && _config.c2dClusters) { + if (!this.db || !this.db.c2d) { + OCEAN_NODE_LOGGER.error('C2DDatabase is mandatory for compute engines!') + return + } + this.c2dEngines = new C2DEngines(_config, this.db.c2d) + } } public getP2PNode(): OceanP2P | undefined { diff --git a/src/components/c2d/compute_engine_base.ts b/src/components/c2d/compute_engine_base.ts index ef147eea5..68d1d3245 100644 --- a/src/components/c2d/compute_engine_base.ts +++ b/src/components/c2d/compute_engine_base.ts @@ -5,11 +5,12 @@ import type { ComputeAlgorithm, ComputeAsset, ComputeJob, - ComputeOutput -} from '../../@types/C2D.js' -import { C2DClusterType } from '../../@types/C2D.js' + ComputeOutput, + DBComputeJob +} from '../../@types/C2D/C2D.js' +import { C2DClusterType } from '../../@types/C2D/C2D.js' -export class C2DEngine { +export abstract class C2DEngine { private clusterConfig: C2DClusterInfo public constructor(cluster: C2DClusterInfo) { this.clusterConfig = cluster @@ -26,19 +27,49 @@ export class C2DEngine { } // functions which need to be implemented by all engine types - // eslint-disable-next-line require-await - public async getComputeEnvironments(chainId: number): Promise { - throw new Error(`Not implemented`) - } + public abstract getComputeEnvironments(chainId?: number): Promise - public async start(): Promise { - // overwritten by classes for start actions + // overwritten by classes for start actions + public start(): Promise { + throw new Error('Method not implemented.') } - public async stop(): Promise { - // overwritten by classes for cleanup + // overwritten by classes for cleanup + public stop(): Promise { + throw new Error('Method not implemented.') } + public abstract startComputeJob( + assets: ComputeAsset[], + algorithm: ComputeAlgorithm, + output: ComputeOutput, + environment: string, + owner?: string, + validUntil?: number, + chainId?: number, + agreementId?: string + ): Promise + + public abstract stopComputeJob( + jobId: string, + owner: string, + agreementId?: string + ): Promise + + public abstract getComputeJobStatus( + consumerAddress?: string, + agreementId?: string, + jobId?: string + ): Promise + + public abstract getComputeJobResult( + consumerAddress: string, + jobId: string, + index: number + ): Promise + + public abstract cleanupExpiredStorage(job: DBComputeJob): Promise + public async envExists( chainId: number, envIdWithHash?: string, @@ -77,49 +108,69 @@ export class C2DEngine { return null } - // eslint-disable-next-line require-await - public async startComputeJob( + public getStreamableLogs(jobId: string): Promise { + throw new Error(`Not implemented for this engine type`) + } + + protected async getJobEnvironment(job: DBComputeJob): Promise { + const environments: ComputeEnvironment[] = await ( + await this.getComputeEnvironments() + ).filter((env: ComputeEnvironment) => env.id === job.environment) + // found it + if (environments.length === 1) { + const environment = environments[0] + return environment + } + return null + } +} + +export class C2DEngineLocal extends C2DEngine { + public getComputeEnvironments(chainId?: number): Promise { + throw new Error('Method not implemented.') + } + + public startComputeJob( assets: ComputeAsset[], algorithm: ComputeAlgorithm, output: ComputeOutput, - owner: string, environment: string, - validUntil: number, - chainId: number, - agreementId: string + owner?: string, + validUntil?: number, + chainId?: number, + agreementId?: string ): Promise { - throw new Error(`Not implemented`) + throw new Error('Method not implemented.') } - // eslint-disable-next-line require-await - public async stopComputeJob( + public stopComputeJob( jobId: string, owner: string, agreementId?: string ): Promise { - throw new Error(`Not implemented`) + throw new Error('Method not implemented.') } - // eslint-disable-next-line require-await - public async getComputeJobStatus( + public getComputeJobStatus( consumerAddress?: string, agreementId?: string, jobId?: string ): Promise { - throw new Error(`Not implemented`) + throw new Error('Method not implemented.') } - // eslint-disable-next-line require-await - public async getComputeJobResult( + public getComputeJobResult( consumerAddress: string, jobId: string, index: number ): Promise { - throw new Error(`Not implemented`) + throw new Error('Method not implemented.') + } + + public cleanupExpiredStorage(job: DBComputeJob): Promise { + throw new Error('Method not implemented.') } -} -export class C2DEngineLocal extends C2DEngine { // eslint-disable-next-line no-useless-constructor public constructor(clusterConfig: C2DClusterInfo) { super(clusterConfig) diff --git a/src/components/c2d/compute_engine_docker.ts b/src/components/c2d/compute_engine_docker.ts new file mode 100644 index 000000000..69577ed55 --- /dev/null +++ b/src/components/c2d/compute_engine_docker.ts @@ -0,0 +1,1118 @@ +/* eslint-disable security/detect-non-literal-fs-filename */ +import { Readable } from 'stream' +import { C2DClusterType, C2DStatusNumber, C2DStatusText } from '../../@types/C2D/C2D.js' +import type { + C2DClusterInfo, + ComputeEnvironment, + ComputeAlgorithm, + ComputeAsset, + ComputeJob, + ComputeOutput, + DBComputeJob, + ComputeResult, + RunningPlatform +} from '../../@types/C2D/C2D.js' +import { getConfiguration } from '../../utils/config.js' +import { C2DEngine } from './compute_engine_base.js' +import { C2DDatabase } from '../database/C2DDatabase.js' +import { create256Hash } from '../../utils/crypt.js' +import { Storage } from '../storage/index.js' +import Dockerode from 'dockerode' +import type { ContainerCreateOptions, HostConfig, VolumeCreateOptions } from 'dockerode' +import * as tar from 'tar' +import { + createWriteStream, + existsSync, + mkdirSync, + rmSync, + writeFileSync, + statSync, + createReadStream +} from 'fs' +import { pipeline } from 'node:stream/promises' +import { CORE_LOGGER } from '../../utils/logging/common.js' +import { generateUniqueID } from '../database/sqliteCompute.js' +import { Blockchain } from '../../utils/blockchain.js' +import { AssetUtils } from '../../utils/asset.js' +import { FindDdoHandler } from '../core/handler/ddoHandler.js' +import { OceanNode } from '../../OceanNode.js' +import { Service } from '../../@types/DDO/Service.js' +import { decryptFilesObject, omitDBComputeFieldsFromComputeJob } from './index.js' +import * as drc from 'docker-registry-client' +import { ValidateParams } from '../httpRoutes/validateCommands.js' +import { convertGigabytesToBytes } from '../../utils/util.js' + +export class C2DEngineDocker extends C2DEngine { + private envs: ComputeEnvironment[] = [] + protected db: C2DDatabase + public docker: Dockerode + private cronTimer: any + private cronTime: number = 2000 + public constructor(clusterConfig: C2DClusterInfo, db: C2DDatabase) { + super(clusterConfig) + this.db = db + this.docker = null + if (clusterConfig.connection.socketPath) { + try { + this.docker = new Dockerode({ socketPath: clusterConfig.connection.socketPath }) + } catch (e) { + CORE_LOGGER.error('Could not create Docker container: ' + e.message) + } + } + if ( + clusterConfig.connection.protocol && + clusterConfig.connection.host && + clusterConfig.connection.port + ) { + try { + this.docker = new Dockerode({ + protocol: clusterConfig.connection.protocol, + host: clusterConfig.connection.host, + port: clusterConfig.connection.port + }) + } catch (e) { + CORE_LOGGER.error('Could not create Docker container: ' + e.message) + } + } + // TO DO C2D - create envs + try { + if (!existsSync(clusterConfig.tempFolder)) + mkdirSync(clusterConfig.tempFolder, { recursive: true }) + } catch (e) { + CORE_LOGGER.error( + 'Could not create Docker container temporary folders: ' + e.message + ) + } + + if (clusterConfig.connection?.environments) { + this.envs = clusterConfig.connection.environments + } + // only when we got the first request to start a compute job, + // no need to start doing this right away + // this.setNewTimer() + } + + // eslint-disable-next-line require-await + public override async getComputeEnvironments( + chainId?: number + ): Promise { + /** + * Returns all cluster's compute environments for a specific chainId. Env's id already contains the cluster hash + */ + if (!this.docker) return [] + + if (chainId) { + const config = await getConfiguration() + const supportedNetwork = config.supportedNetworks[chainId] + if (supportedNetwork) { + const blockchain = new Blockchain( + supportedNetwork.rpc, + supportedNetwork.network, + chainId, + supportedNetwork.fallbackRPCs + ) + + // write the consumer address (compute env address) + const consumerAddress = await blockchain.getWalletAddress() + const filteredEnvs = [] + for (const computeEnv of this.envs) { + if (computeEnv.chainId === chainId) { + computeEnv.consumerAddress = consumerAddress + filteredEnvs.push(computeEnv) + } + } + return filteredEnvs + } + // no compute envs or network is not supported + CORE_LOGGER.error(`There are no free compute environments for network ${chainId}`) + return [] + } + return this.envs + } + + /** + * Checks the docker image by looking at the manifest + * @param image name or tag + * @returns boolean + */ + public static async checkDockerImage( + image: string, + platform?: RunningPlatform + ): Promise { + try { + const info = drc.default.parseRepoAndRef(image) + /** + * info: { + index: { name: 'docker.io', official: true }, + official: true, + remoteName: 'library/node', + localName: 'node', + canonicalName: 'docker.io/node', + digest: 'sha256:1155995dda741e93afe4b1c6ced2d01734a6ec69865cc0997daf1f4db7259a36' + } + */ + const client = drc.createClientV2({ name: info.localName }) + const tagOrDigest = info.tag || info.digest + + // try get manifest from registry + return await new Promise((resolve, reject) => { + client.getManifest( + { ref: tagOrDigest, maxSchemaVersion: 2 }, + function (err: any, manifest: any) { + client.close() + if (manifest) { + return resolve({ + valid: checkManifestPlatform(manifest.platform, platform) + }) + } + + if (err) { + CORE_LOGGER.error( + `Unable to get Manifest for image ${image}: ${err.message}` + ) + reject(err) + } + } + ) + }) + } catch (err) { + // show all aggregated errors, if present + const aggregated = err.errors && err.errors.length > 0 + aggregated ? CORE_LOGGER.error(JSON.stringify(err.errors)) : CORE_LOGGER.error(err) + return { + valid: false, + status: 404, + reason: aggregated ? JSON.stringify(err.errors) : err.message + } + } + } + + // eslint-disable-next-line require-await + public override async startComputeJob( + assets: ComputeAsset[], + algorithm: ComputeAlgorithm, + output: ComputeOutput, + environment: string, + owner?: string, + validUntil?: number, + chainId?: number, + agreementId?: string + ): Promise { + if (!this.docker) return [] + + const jobId = generateUniqueID() + + // C2D - Check image, check arhitecture, etc + const image = getAlgorithmImage(algorithm) + // ex: node@sha256:1155995dda741e93afe4b1c6ced2d01734a6ec69865cc0997daf1f4db7259a36 + if (!image) { + // send a 500 with the error message + throw new Error( + `Unable to extract docker image ${image} from algoritm: ${JSON.stringify( + algorithm + )}` + ) + } + const envIdWithHash = environment && environment.indexOf('-') > -1 + const env = await this.getComputeEnvironment( + chainId, + envIdWithHash ? environment : null, + environment + ) + + const validation = await C2DEngineDocker.checkDockerImage( + image, + env.platform && env.platform.length > 0 ? env.platform[0] : null + ) + if (!validation.valid) + throw new Error(`Unable to validate docker image ${image}: ${validation.reason}`) + + const job: DBComputeJob = { + clusterHash: this.getC2DConfig().hash, + containerImage: image, + owner, + jobId, + dateCreated: String(Date.now() / 1000), + dateFinished: null, + status: C2DStatusNumber.JobStarted, + statusText: C2DStatusText.JobStarted, + results: [], + algorithm, + assets, + agreementId, + expireTimestamp: Date.now() / 1000 + validUntil, + environment, + configlogURL: null, + publishlogURL: null, + algologURL: null, + outputsURL: null, + stopRequested: false, + isRunning: true, + isStarted: false + } + await this.makeJobFolders(job) + // make sure we actually were able to insert on DB + const addedId = await this.db.newJob(job) + if (!addedId) { + return [] + } + + // only now set the timer + if (!this.cronTimer) { + this.setNewTimer() + } + const cjob: ComputeJob = omitDBComputeFieldsFromComputeJob(job) + // we add cluster hash to user output + cjob.jobId = this.getC2DConfig().hash + '-' + cjob.jobId + // cjob.jobId = jobId + return [cjob] + } + + // eslint-disable-next-line require-await + public override async stopComputeJob( + jobId: string, + owner: string, + agreementId?: string + ): Promise { + return null + } + + // eslint-disable-next-line require-await + protected async getResults(jobId: string): Promise { + const res: ComputeResult[] = [] + let index = 0 + try { + const logStat = statSync( + this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/algorithm.log' + ) + if (logStat) { + res.push({ + filename: 'algorithm.log', + filesize: logStat.size, + type: 'algorithmLog', + index + }) + index = index + 1 + } + } catch (e) {} + try { + const outputStat = statSync( + this.getC2DConfig().tempFolder + '/' + jobId + '/data/outputs/outputs.tar' + ) + if (outputStat) { + res.push({ + filename: 'outputs.tar', + filesize: outputStat.size, + type: 'output', + index + }) + index = index + 1 + } + } catch (e) {} + return res + } + + // eslint-disable-next-line require-await + public override async getComputeJobStatus( + consumerAddress?: string, + agreementId?: string, + jobId?: string + ): Promise { + const jobs = await this.db.getJob(jobId, agreementId, consumerAddress) + if (jobs.length === 0) { + return [] + } + const statusResults = [] + for (const job of jobs) { + const res: ComputeJob = omitDBComputeFieldsFromComputeJob(job) + // add results for algoLogs + res.results = await this.getResults(job.jobId) + statusResults.push(res) + } + + return statusResults + } + + // eslint-disable-next-line require-await + public override async getComputeJobResult( + consumerAddress: string, + jobId: string, + index: number + ): Promise { + const jobs = await this.db.getJob(jobId, null, consumerAddress) + if (jobs.length === 0) { + return null + } + const results = await this.getResults(jobId) + for (const i of results) { + if (i.index === index) { + if (i.type === 'algorithmLog') { + return createReadStream( + this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/algorithm.log' + ) + } + if (i.type === 'output') { + return createReadStream( + this.getC2DConfig().tempFolder + '/' + jobId + '/data/outputs/outputs.tar' + ) + } + } + } + return null + } + + // eslint-disable-next-line require-await + public override async getStreamableLogs(jobId: string): Promise { + const jobRes: DBComputeJob[] = await this.db.getJob(jobId) + if (jobRes.length === 0) return null + if (!jobRes[0].isRunning) return null + try { + const job = jobRes[0] + const container = await this.docker.getContainer(job.jobId + '-algoritm') + const details = await container.inspect() + if (details.State.Running === false) return null + return await container.logs({ + stdout: true, + stderr: true, + follow: true + }) + } catch (e) { + return null + } + } + + private async setNewTimer() { + // don't set the cron if we don't have compute environments + if ((await this.getComputeEnvironments()).length > 0) + this.cronTimer = setInterval(this.InternalLoop.bind(this), this.cronTime) + } + + private async InternalLoop() { + // this is the internal loop of docker engine + // gets list of all running jobs and process them one by one + clearInterval(this.cronTimer) + this.cronTimer = null + // get all running jobs + const jobs = await this.db.getRunningJobs(this.getC2DConfig().hash) + + if (jobs.length === 0) { + CORE_LOGGER.info('No C2D jobs found for engine ' + this.getC2DConfig().hash) + return + } else { + CORE_LOGGER.info(`Got ${jobs.length} jobs for engine ${this.getC2DConfig().hash}`) + CORE_LOGGER.debug(JSON.stringify(jobs)) + } + const promises: any = [] + for (const job of jobs) { + promises.push(this.processJob(job)) + } + // wait for all promises, there is no return + await Promise.all(promises) + // set the cron again + this.setNewTimer() + } + + // eslint-disable-next-line require-await + private async processJob(job: DBComputeJob) { + console.log(`Process job started: [STATUS: ${job.status}: ${job.statusText}]`) + console.log(job) + // has to : + // - monitor running containers and stop them if over limits + // - monitor disc space and clean up + /* steps: + - instruct docker to pull image + - create volume + - after image is ready, create the container + - download assets & algo into temp folder + - download DDOS + - tar and upload assets & algo to container + - start the container + - check if container is exceeding validUntil + - if yes, stop it + - download /data/outputs and store it locally (or upload it somewhere) + - delete the container + - delete the volume + */ + if (job.status === C2DStatusNumber.JobStarted) { + // pull docker image + try { + const pullStream = await this.docker.pull(job.containerImage) + await new Promise((resolve, reject) => { + let wroteStatusBanner = false + this.docker.modem.followProgress( + pullStream, + (err, res) => { + // onFinished + if (err) return reject(err) + CORE_LOGGER.info('############# Pull docker image complete ##############') + resolve(res) + }, + (progress) => { + // onProgress + if (!wroteStatusBanner) { + wroteStatusBanner = true + CORE_LOGGER.info('############# Pull docker image status: ##############') + } + // only write the status banner once, its cleaner + CORE_LOGGER.info(progress.status) + } + ) + }) + } catch (err) { + CORE_LOGGER.error( + `Unable to pull docker image: ${job.containerImage}: ${err.message}` + ) + await this.db.deleteJob(job.jobId) + return + } + + job.status = C2DStatusNumber.PullImage + job.statusText = C2DStatusText.PullImage + await this.db.updateJob(job) + return // now we wait until image is ready + } + if (job.status === C2DStatusNumber.PullImage) { + try { + const imageInfo = await this.docker.getImage(job.containerImage) + console.log('imageInfo', imageInfo) + const details = await imageInfo.inspect() + console.log('details:', details) + job.status = C2DStatusNumber.ConfiguringVolumes + job.statusText = C2DStatusText.ConfiguringVolumes + await this.db.updateJob(job) + // now we can move forward + } catch (e) { + // not ready yet + console.log('ERROR: Unable to inspect', e.message) + } + return + } + if (job.status === C2DStatusNumber.ConfiguringVolumes) { + // create the volume & create container + // TO DO C2D: Choose driver & size + const volume: VolumeCreateOptions = { + Name: job.jobId + '-volume' + } + try { + await this.docker.createVolume(volume) + } catch (e) { + job.status = C2DStatusNumber.VolumeCreationFailed + job.statusText = C2DStatusText.VolumeCreationFailed + job.isRunning = false + await this.db.updateJob(job) + await this.cleanupJob(job) + } + // get env info + const environment = await this.getJobEnvironment(job) + // create the container + const mountVols: any = { '/data': {} } + const hostConfig: HostConfig = { + Mounts: [ + { + Type: 'volume', + Source: volume.Name, + Target: '/data', + ReadOnly: false + } + ] + } + if (environment != null) { + // limit container CPU & Memory usage according to env specs + hostConfig.CpuCount = environment.cpuNumber || 1 + // if more than 1 CPU + if (hostConfig.CpuCount > 1) { + hostConfig.CpusetCpus = `0-${hostConfig.CpuCount - 1}` + } + hostConfig.Memory = 0 || convertGigabytesToBytes(environment.ramGB) + // set swap to same memory value means no swap (otherwise it use like 2X mem) + hostConfig.MemorySwap = hostConfig.Memory + } + // console.log('host config: ', hostConfig) + const containerInfo: ContainerCreateOptions = { + name: job.jobId + '-algoritm', + Image: job.containerImage, + AttachStdin: false, + AttachStdout: true, + AttachStderr: true, + Tty: true, + OpenStdin: false, + StdinOnce: false, + Volumes: mountVols, + HostConfig: hostConfig + } + + if (job.algorithm.meta.container.entrypoint) { + const newEntrypoint = job.algorithm.meta.container.entrypoint.replace( + '$ALGO', + 'data/transformations/algorithm' + ) + containerInfo.Entrypoint = newEntrypoint.split(' ') + } + + try { + const container = await this.docker.createContainer(containerInfo) + console.log('container: ', container) + job.status = C2DStatusNumber.Provisioning + job.statusText = C2DStatusText.Provisioning + await this.db.updateJob(job) + } catch (e) { + job.status = C2DStatusNumber.ContainerCreationFailed + job.statusText = C2DStatusText.ContainerCreationFailed + job.isRunning = false + await this.db.updateJob(job) + await this.cleanupJob(job) + } + return + } + if (job.status === C2DStatusNumber.Provisioning) { + // download algo & assets + const ret = await this.uploadData(job) + console.log('Upload data') + console.log(ret) + job.status = ret.status + job.statusText = ret.statusText + if (job.status !== C2DStatusNumber.RunningAlgorithm) { + // failed, let's close it + job.isRunning = false + await this.db.updateJob(job) + await this.cleanupJob(job) + } else { + await this.db.updateJob(job) + } + } + if (job.status === C2DStatusNumber.RunningAlgorithm) { + const container = await this.docker.getContainer(job.jobId + '-algoritm') + const details = await container.inspect() + console.log('Container inspect') + console.log(details) + if (job.isStarted === false) { + // make sure is not started + if (details.State.Running === false) { + try { + await container.start() + job.isStarted = true + await this.db.updateJob(job) + return + } catch (e) { + // container failed to start + console.error('could not start container: ' + e.message) + console.log(e) + job.status = C2DStatusNumber.AlgorithmFailed + job.statusText = C2DStatusText.AlgorithmFailed + job.algologURL = String(e) + job.isRunning = false + await this.db.updateJob(job) + await this.cleanupJob(job) + return + } + } + } else { + // is running, we need to stop it.. + console.log('running, need to stop it?') + const timeNow = Date.now() / 1000 + console.log('timeNow: ' + timeNow + ' , Expiry: ' + job.expireTimestamp) + if (timeNow > job.expireTimestamp || job.stopRequested) { + // we need to stop the container + // make sure is running + console.log('We need to stop') + console.log(details.State.Running) + if (details.State.Running === true) { + try { + await container.stop() + } catch (e) { + // we should never reach this, unless the container is already stopped or deleted by someone else + console.log(e) + } + } + console.log('Stopped') + job.isStarted = false + job.status = C2DStatusNumber.PublishingResults + job.statusText = C2DStatusText.PublishingResults + await this.db.updateJob(job) + return + } else { + if (details.State.Running === false) { + job.isStarted = false + job.status = C2DStatusNumber.PublishingResults + job.statusText = C2DStatusText.PublishingResults + await this.db.updateJob(job) + return + } + } + } + } + if (job.status === C2DStatusNumber.PublishingResults) { + // get output + job.status = C2DStatusNumber.JobFinished + job.statusText = C2DStatusText.JobFinished + const container = await this.docker.getContainer(job.jobId + '-algoritm') + const outputsArchivePath = + this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/outputs/outputs.tar' + try { + await pipeline( + await container.getArchive({ path: '/data/outputs' }), + createWriteStream(outputsArchivePath) + ) + } catch (e) { + console.log(e) + job.status = C2DStatusNumber.ResultsFetchFailed + job.statusText = C2DStatusText.ResultsFetchFailed + } + job.isRunning = false + await this.db.updateJob(job) + await this.cleanupJob(job) + } + } + + // eslint-disable-next-line require-await + private async cleanupJob(job: DBComputeJob) { + // cleaning up + // - get algo logs + // - delete volume + // - delete container + + const container = await this.docker.getContainer(job.jobId + '-algoritm') + try { + writeFileSync( + this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/logs/algorithm.log', + await container.logs({ + stdout: true, + stderr: true, + follow: false + }) + ) + } catch (e) { + console.log(e) + } + + await container.remove() + const volume = await this.docker.getVolume(job.jobId + '-volume') + await volume.remove() + // remove folders + rmSync(this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/inputs', { + recursive: true, + force: true + }) + rmSync(this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/transformations', { + recursive: true, + force: true + }) + } + + private deleteOutputFolder(job: DBComputeJob) { + rmSync(this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/outputs/', { + recursive: true, + force: true + }) + } + + private async uploadData( + job: DBComputeJob + ): Promise<{ status: C2DStatusNumber; statusText: C2DStatusText }> { + const config = await getConfiguration() + const ret = { + status: C2DStatusNumber.RunningAlgorithm, + statusText: C2DStatusText.RunningAlgorithm + } + // for testing purposes + // if (!job.algorithm.fileObject) { + // console.log('no file object') + // const file: UrlFileObject = { + // type: 'url', + // url: 'https://raw.githubusercontent.com/oceanprotocol/test-algorithm/master/javascript/algo.js', + // method: 'get' + // } + // job.algorithm.fileObject = file + // } + // download algo + // TODO: we currently DO NOT have a way to set this field unencrypted (once we publish the asset its encrypted) + // So we cannot test this from the CLI for instance... Only Option is to actually send it encrypted + // OR extract the files object from the passed DDO, decrypt it and use it + + console.log(job.algorithm.fileObject) + const fullAlgoPath = + this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/transformations/algorithm' + try { + let storage = null + // do we have a files object? + if (job.algorithm.fileObject) { + // is it unencrypted? + if (job.algorithm.fileObject.type) { + // we can get the storage directly + storage = Storage.getStorageClass(job.algorithm.fileObject, config) + } else { + // ok, maybe we have this encrypted instead + CORE_LOGGER.info('algorithm file object seems to be encrypted, checking it...') + // 1. Decrypt the files object + const decryptedFileObject = await decryptFilesObject(job.algorithm.fileObject) + console.log('decryptedFileObject: ', decryptedFileObject) + // 2. Get default storage settings + storage = Storage.getStorageClass(decryptedFileObject, config) + } + } else { + // no files object, try to get information from documentId and serviceId + CORE_LOGGER.info( + 'algorithm file object seems to be missing, checking "serviceId" and "documentId"...' + ) + const { serviceId, documentId } = job.algorithm + // we can get it from this info + if (serviceId && documentId) { + const algoDdo = await new FindDdoHandler( + OceanNode.getInstance() + ).findAndFormatDdo(documentId) + console.log('algo ddo:', algoDdo) + // 1. Get the service + const service: Service = AssetUtils.getServiceById(algoDdo, serviceId) + + // 2. Decrypt the files object + const decryptedFileObject = await decryptFilesObject(service.files) + console.log('decryptedFileObject: ', decryptedFileObject) + // 4. Get default storage settings + storage = Storage.getStorageClass(decryptedFileObject, config) + } + } + + if (storage) { + console.log('fullAlgoPath', fullAlgoPath) + await pipeline( + (await storage.getReadableStream()).stream, + createWriteStream(fullAlgoPath) + ) + } else { + CORE_LOGGER.info( + 'Could not extract any files object from the compute algorithm, skipping...' + ) + } + } catch (e) { + CORE_LOGGER.error( + 'Unable to write algorithm to path: ' + fullAlgoPath + ': ' + e.message + ) + return { + status: C2DStatusNumber.AlgorithmProvisioningFailed, + statusText: C2DStatusText.AlgorithmProvisioningFailed + } + } + + // now for the assets + for (const i in job.assets) { + const asset = job.assets[i] + let storage = null + let fileInfo = null + console.log('checking now asset: ', asset) + // without this check it would break if no fileObject is present + if (asset.fileObject) { + if (asset.fileObject.type) { + storage = Storage.getStorageClass(asset.fileObject, config) + } else { + CORE_LOGGER.info('asset file object seems to be encrypted, checking it...') + // get the encrypted bytes + const filesObject: any = await decryptFilesObject(asset.fileObject) + storage = Storage.getStorageClass(filesObject, config) + } + + // we need the file info for the name (but could be something else here) + fileInfo = await storage.getFileInfo({ + type: storage.getStorageType(asset.fileObject) + }) + } else { + // we need to go the hard way + const { serviceId, documentId } = asset + if (serviceId && documentId) { + // need to get the file + const ddo = await new FindDdoHandler(OceanNode.getInstance()).findAndFormatDdo( + documentId + ) + + // 2. Get the service + const service: Service = AssetUtils.getServiceById(ddo, serviceId) + // 3. Decrypt the url + const decryptedFileObject = await decryptFilesObject(service.files) + console.log('decryptedFileObject: ', decryptedFileObject) + storage = Storage.getStorageClass(decryptedFileObject, config) + + fileInfo = await storage.getFileInfo({ + type: storage.getStorageType(decryptedFileObject) + }) + } + } + + if (storage && fileInfo) { + const fullPath = + this.getC2DConfig().tempFolder + + '/' + + job.jobId + + '/data/inputs/' + + fileInfo[0].name + + console.log('asset full path: ' + fullPath) + try { + await pipeline( + (await storage.getReadableStream()).stream, + createWriteStream(fullPath) + ) + } catch (e) { + CORE_LOGGER.error( + 'Unable to write input data to path: ' + fullPath + ': ' + e.message + ) + return { + status: C2DStatusNumber.DataProvisioningFailed, + statusText: C2DStatusText.DataProvisioningFailed + } + } + } else { + CORE_LOGGER.info( + 'Could not extract any files object from the compute asset, skipping...' + ) + } + } + CORE_LOGGER.info('All good with data provisioning, will start uploading it...') + // now, we have to create a tar arhive + const folderToTar = this.getC2DConfig().tempFolder + '/' + job.jobId + '/data' + const destination = + this.getC2DConfig().tempFolder + '/' + job.jobId + '/tarData/upload.tar.gz' + tar.create( + { + gzip: true, + file: destination, + sync: true, + C: folderToTar + }, + ['./'] + ) + // now, upload it to the container + const container = await this.docker.getContainer(job.jobId + '-algoritm') + + console.log('Start uploading') + try { + // await container2.putArchive(destination, { + const stream = await container.putArchive(destination, { + path: '/data' + }) + console.log('PutArchive') + console.log(stream) + + console.log('Done uploading') + } catch (e) { + console.log('Data upload failed') + console.log(e) + return { + status: C2DStatusNumber.DataUploadFailed, + statusText: C2DStatusText.DataUploadFailed + } + } + rmSync(this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/inputs', { + recursive: true, + force: true + }) + rmSync(this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/transformations', { + recursive: true, + force: true + }) + rmSync(this.getC2DConfig().tempFolder + '/' + job.jobId + '/tarData', { + recursive: true, + force: true + }) + return ret + } + + // eslint-disable-next-line require-await + private async makeJobFolders(job: DBComputeJob) { + try { + const baseFolder = this.getC2DConfig().tempFolder + '/' + job.jobId + console.log('BASE FOLDER: ' + baseFolder) + if (!existsSync(baseFolder)) mkdirSync(baseFolder) + if (!existsSync(baseFolder + '/data')) mkdirSync(baseFolder + '/data') + if (!existsSync(baseFolder + '/data/inputs')) mkdirSync(baseFolder + '/data/inputs') + if (!existsSync(baseFolder + '/data/transformations')) + mkdirSync(baseFolder + '/data/transformations') + // ddo directory + if (!existsSync(baseFolder + '/data/ddos')) { + mkdirSync(baseFolder + '/data/ddos') + } + if (!existsSync(baseFolder + '/data/outputs')) + mkdirSync(baseFolder + '/data/outputs') + if (!existsSync(baseFolder + '/data/logs')) mkdirSync(baseFolder + '/data/logs') + if (!existsSync(baseFolder + '/tarData')) mkdirSync(baseFolder + '/tarData') // used to upload and download data + } catch (e) {} + } + + // clean up temporary files + public override async cleanupExpiredStorage( + job: DBComputeJob, + isCleanAfterDownload: boolean = false + ): Promise { + if (!job) return false + CORE_LOGGER.info('Cleaning up C2D storage for Job: ' + job.jobId) + try { + // delete the storage + // for free env, the container is deleted as soon as we download the results + // so we avoid trying to do it again + if (!isCleanAfterDownload) { + await this.cleanupJob(job) + } + + // delete output folders + await this.deleteOutputFolder(job) + // delete the job + await this.db.deleteJob(job.jobId) + return true + } catch (e) { + CORE_LOGGER.error('Error cleaning up C2D storage and Job: ' + e.message) + } + return false + } +} + +// this uses the docker engine, but exposes only one env, the free one +export class C2DEngineDockerFree extends C2DEngineDocker { + public constructor(clusterConfig: C2DClusterInfo, db: C2DDatabase) { + // we remove envs, cause we have our own + const hash = create256Hash('free' + clusterConfig.hash) + const owerwrite = { + type: C2DClusterType.DOCKER, + hash, + connection: { + socketPath: clusterConfig.connection.socketPath, + protocol: clusterConfig.connection.protocol, + host: clusterConfig.connection.host, + port: clusterConfig.connection.port, + caPath: clusterConfig.connection.caPath, + certPath: clusterConfig.connection.certPath, + keyPath: clusterConfig.connection.keyPath, + freeComputeOptions: clusterConfig.connection.freeComputeOptions + }, + tempFolder: './c2d_storage/' + hash + } + super(owerwrite, db) + } + + // eslint-disable-next-line require-await + public override async getComputeEnvironments( + chainId?: number + ): Promise { + /** + * Returns all cluster's compute environments for a specific chainId. Env's id already contains the cluster hash + */ + // TO DO C2D - fill consts below + if (!this.docker) return [] + // const cpuType = '' + // const currentJobs = 0 + // const consumerAddress = '' + if (chainId) { + const config = await getConfiguration() + const supportedNetwork = config.supportedNetworks[chainId] + if (supportedNetwork) { + const blockchain = new Blockchain( + supportedNetwork.rpc, + supportedNetwork.network, + chainId, + supportedNetwork.fallbackRPCs + ) + + // write the consumer address (compute env address) + const consumerAddress = await blockchain.getWalletAddress() + const computeEnv: ComputeEnvironment = + this.getC2DConfig().connection?.freeComputeOptions + if (computeEnv.chainId === chainId) { + computeEnv.consumerAddress = consumerAddress + const envs: ComputeEnvironment[] = [computeEnv] + return envs + } + } + // no compute envs or network is not supported + CORE_LOGGER.error(`There are no free compute environments for network ${chainId}`) + return [] + } + // get them all + const envs: ComputeEnvironment[] = [ + this.getC2DConfig().connection?.freeComputeOptions + ] + return envs + } + + public override async startComputeJob( + assets: ComputeAsset[], + algorithm: ComputeAlgorithm, + output: ComputeOutput, + environment: string, + owner?: string, + validUntil?: number, + chainId?: number, + agreementId?: string + ): Promise { + // since it's a free job, we need to mangle some params + agreementId = create256Hash( + JSON.stringify({ + owner, + assets, + algorithm, + time: process.hrtime.bigint().toString() + }) + ) + chainId = 0 + const envs = await this.getComputeEnvironments() + if (envs.length < 1) { + // no free env ?? + throw new Error('No free env found') + } + validUntil = envs[0].maxJobDuration + return await super.startComputeJob( + assets, + algorithm, + output, + environment, + owner, + validUntil, + chainId, + agreementId + ) + } + + // eslint-disable-next-line require-await + public override async getComputeJobResult( + consumerAddress: string, + jobId: string, + index: number + ): Promise { + const result = await super.getComputeJobResult(consumerAddress, jobId, index) + if (result !== null) { + setTimeout(async () => { + const jobs: DBComputeJob[] = await this.db.getJob(jobId) + CORE_LOGGER.info( + 'Cleaning storage for free container, after retrieving results...' + ) + if (jobs.length === 1) { + this.cleanupExpiredStorage(jobs[0], true) // clean the storage, do not wait for it to expire + } + }, 5000) + } + return result + } +} + +export function getAlgorithmImage(algorithm: ComputeAlgorithm): string { + if (!algorithm.meta || !algorithm.meta.container) { + return null + } + let { image } = algorithm.meta.container + if (algorithm.meta.container.checksum) + image = image + '@' + algorithm.meta.container.checksum + else if (algorithm.meta.container.tag) + image = image + ':' + algorithm.meta.container.tag + else image = image + ':latest' + console.log('Using image: ' + image) + return image +} + +export function checkManifestPlatform( + manifestPlatform: any, + envPlatform?: RunningPlatform +): boolean { + if (!manifestPlatform || !envPlatform) return true // skips if not present + if ( + envPlatform.architecture !== manifestPlatform.architecture || + envPlatform.os !== manifestPlatform.os + ) + return false + return true +} diff --git a/src/components/c2d/compute_engine_opf_k8.ts b/src/components/c2d/compute_engine_opf_k8.ts index 9956cdcf2..df970177b 100644 --- a/src/components/c2d/compute_engine_opf_k8.ts +++ b/src/components/c2d/compute_engine_opf_k8.ts @@ -6,6 +6,9 @@ import type { ComputeAsset, ComputeJob, ComputeOutput, + DBComputeJob +} from '../../@types/C2D/C2D.js' +import type { OPFK8ComputeStage, OPFK8ComputeStageAlgorithm, OPFK8ComputeStageInput, @@ -14,7 +17,7 @@ import type { OPFK8ComputeStop, OPFK8ComputeGetStatus, OPFK8ComputeGetResult -} from '../../@types/C2D.js' +} from '../../@types/C2D/C2D_OPFK8.js' import { sign } from '../core/utils/nonceHandler.js' import axios from 'axios' import { getConfiguration } from '../../utils/config.js' @@ -22,7 +25,7 @@ import { ZeroAddress } from 'ethers' import { getProviderFeeToken } from '../../components/core/utils/feesHandler.js' import { URLUtils } from '../../utils/url.js' import { C2DEngine } from './compute_engine_base.js' - +import { Storage } from '../storage/index.js' export class C2DEngineOPFK8 extends C2DEngine { // eslint-disable-next-line no-useless-constructor public constructor(clusterConfig: C2DClusterInfo) { @@ -30,7 +33,7 @@ export class C2DEngineOPFK8 extends C2DEngine { } public override async getComputeEnvironments( - chainId: number + chainId?: number ): Promise { /** * Returns all cluster's compute environments for a specific chainId. Env's id already contains the cluster hash @@ -38,13 +41,16 @@ export class C2DEngineOPFK8 extends C2DEngine { const envs: ComputeEnvironment[] = [] const clusterHash = this.getC2DConfig().hash const baseUrl = URLUtils.sanitizeURLPath(this.getC2DConfig().connection) - const url = `${baseUrl}api/v1/operator/environments?chain_id=${chainId}` + let url = `${baseUrl}api/v1/operator/environments` + if (chainId) url += `?chain_id=${chainId}` try { const { data } = await axios.get(url) if (!data) return envs // we need to add hash to each env id for (const [index, val] of data.entries()) { data[index].id = `${clusterHash}-${val.id}` + // k8 envs are not free envs + data[index].free = false if (!data[index].feeToken || data[index].feeToken?.toLowerCase() === ZeroAddress) data[index].feeToken = await getProviderFeeToken(chainId) } @@ -57,24 +63,39 @@ export class C2DEngineOPFK8 extends C2DEngine { assets: ComputeAsset[], algorithm: ComputeAlgorithm, output: ComputeOutput, - owner: string, environment: string, - validUntil: number, - chainId: number, - agreementId: string + owner?: string, + validUntil?: number, + chainId?: number, + agreementId?: string ): Promise { + // owner, validUntil,chainId, agreementId are not optional for OPF K8 + if (!owner) throw new Error(`Cannot start a c2d job without owner`) + if (!validUntil) throw new Error(`Cannot start a c2d job without validUntil`) + if (!chainId) throw new Error(`Cannot start a c2d job without chainId`) + if (!agreementId) throw new Error(`Cannot start a c2d job without agreementId`) // let's build the stage first // start with stage.input const config = await getConfiguration() const stagesInput: OPFK8ComputeStageInput[] = [] let index = 0 for (const asset of assets) { - if (asset.url) - stagesInput.push({ - index, - url: [asset.url] - }) - else + // TODO: we do not have a way (from CLI/SDK) to set this fileObject unencrypted + // Previously we had "url" property but that was never used anywhere for the same reason (we used "remote") + // we don't have the "url" anymore once we publish + if (asset.fileObject) { + try { + // since opf k8 supports only urls, we need to extract them + const storage = Storage.getStorageClass(asset.fileObject, config) + stagesInput.push({ + index, + url: [storage.getDownloadUrl()] + }) + } catch (e) { + const message = `Exception on startCompute. Cannot get URL of asset` + throw new Error(message) + } + } else stagesInput.push({ index, id: asset.documentId, @@ -96,8 +117,17 @@ export class C2DEngineOPFK8 extends C2DEngine { } // continue with algorithm const stageAlgorithm: OPFK8ComputeStageAlgorithm = {} - if (algorithm.url) { - stageAlgorithm.url = algorithm.url + + // TODO: we do not have a way (from CLI/SDK) to set this fileObject unencrypted + if (algorithm.fileObject) { + try { + // since opf k8 supports only urls, we need to extract them + const storage = Storage.getStorageClass(algorithm.fileObject, config) + stageAlgorithm.url = storage.getDownloadUrl() + } catch (e) { + const message = `Exception on startCompute. Cannot get URL of asset` + throw new Error(message) + } } else { stageAlgorithm.remote = { txId: algorithm.transferTxId, @@ -281,4 +311,9 @@ export class C2DEngineOPFK8 extends C2DEngine { } throw new Error(`getComputeJobStatus Failure`) } + + // eslint-disable-next-line require-await + public override async cleanupExpiredStorage(job: DBComputeJob): Promise { + throw new Error(`Not implemented`) + } } diff --git a/src/components/c2d/compute_engines.ts b/src/components/c2d/compute_engines.ts index 9991ac9e4..a119adaa3 100644 --- a/src/components/c2d/compute_engines.ts +++ b/src/components/c2d/compute_engines.ts @@ -1,18 +1,29 @@ -import { C2DClusterType, ComputeEnvironment } from '../../@types/C2D.js' +import { C2DClusterType, ComputeEnvironment } from '../../@types/C2D/C2D.js' import { C2DEngine } from './compute_engine_base.js' import { C2DEngineOPFK8 } from './compute_engine_opf_k8.js' +import { C2DEngineDocker, C2DEngineDockerFree } from './compute_engine_docker.js' import { OceanNodeConfig } from '../../@types/OceanNode.js' +import { C2DDatabase } from '../database/C2DDatabase.js' export class C2DEngines { public engines: C2DEngine[] - public constructor(config: OceanNodeConfig) { + public constructor(config: OceanNodeConfig, db: C2DDatabase) { // let's see what engines do we have and initialize them one by one + // for docker, we need to add the "free" + let haveFree = false if (config && config.c2dClusters) { this.engines = [] for (const cluster of config.c2dClusters) { if (cluster.type === C2DClusterType.OPF_K8) { this.engines.push(new C2DEngineOPFK8(cluster)) } + if (cluster.type === C2DClusterType.DOCKER) { + this.engines.push(new C2DEngineDocker(cluster, db)) + if (!haveFree) { + this.engines.push(new C2DEngineDockerFree(cluster, db)) + haveFree = true + } + } } } } @@ -64,8 +75,25 @@ export class C2DEngines { throw new Error(`C2D Engine not found by hash: ${clusterHash}`) } + async getC2DByEnvId(envId: string): Promise { + /** + * Searches all envs and returns engine class + * + * @param envId - Environment Id + * + */ + const { engines } = this + for (const i of engines) { + const environments = await i.getComputeEnvironments() + for (const env of environments) { + if (env.id === envId) return i + } + } + throw new Error(`C2D Engine not found by id: ${envId}`) + } + async fetchEnvironments( - chainId: number, + chainId?: number, engine?: C2DEngine ): Promise { /** diff --git a/src/components/c2d/index.ts b/src/components/c2d/index.ts index b2215c8fa..4742186b4 100644 --- a/src/components/c2d/index.ts +++ b/src/components/c2d/index.ts @@ -2,8 +2,16 @@ import { OceanNode } from '../../OceanNode.js' import { getConfiguration } from '../../utils/config.js' import { ComputeGetEnvironmentsHandler } from '../core/compute/index.js' import { PROTOCOL_COMMANDS } from '../../utils/constants.js' -import { streamToObject } from '../../utils/util.js' +import { + deleteKeysFromObject, + sanitizeServiceFiles, + streamToObject +} from '../../utils/util.js' import { Readable } from 'stream' +import { decrypt } from '../../utils/crypt.js' +import { BaseFileObject, EncryptMethod } from '../../@types/fileObject.js' +import { CORE_LOGGER } from '../../utils/logging/common.js' +import { ComputeJob, DBComputeJob } from '../../@types/index.js' export { C2DEngine } from './compute_engine_base.js' @@ -32,3 +40,42 @@ export async function checkC2DEnvExists( } return false } + +export async function decryptFilesObject( + serviceFiles: any +): Promise { + try { + // 2. Decrypt the url + const decryptedUrlBytes = await decrypt( + Uint8Array.from(Buffer.from(sanitizeServiceFiles(serviceFiles), 'hex')), + EncryptMethod.ECIES + ) + + // 3. Convert the decrypted bytes back to a string + const decryptedFilesString = Buffer.from(decryptedUrlBytes).toString() + const decryptedFileArray = JSON.parse(decryptedFilesString) + + console.log('decryptedFileArray: ', decryptedFileArray) + return decryptedFileArray.files[0] + } catch (err) { + CORE_LOGGER.error('Error decrypting files object: ' + err.message) + return null + } +} + +export function omitDBComputeFieldsFromComputeJob(dbCompute: DBComputeJob): ComputeJob { + const job: ComputeJob = deleteKeysFromObject(dbCompute, [ + 'clusterHash', + 'configlogURL', + 'publishlogURL', + 'algologURL', + 'outputsURL', + 'stopRequested', + 'algorithm', + 'assets', + 'isRunning', + 'isStarted', + 'containerImage' + ]) as ComputeJob + return job +} diff --git a/src/components/core/compute/environments.ts b/src/components/core/compute/environments.ts index 8561d66c4..a07047e45 100644 --- a/src/components/core/compute/environments.ts +++ b/src/components/core/compute/environments.ts @@ -1,6 +1,6 @@ import { Readable } from 'stream' import { P2PCommandResponse } from '../../../@types/index.js' -import { ComputeEnvByChain } from '../../../@types/C2D.js' +import { ComputeEnvByChain } from '../../../@types/C2D/C2D.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' import { Handler } from '../handler/handler.js' import { ComputeGetEnvironmentsCommand } from '../../../@types/commands.js' diff --git a/src/components/core/compute/getStatus.ts b/src/components/core/compute/getStatus.ts index 23b330298..faf4a6d6a 100644 --- a/src/components/core/compute/getStatus.ts +++ b/src/components/core/compute/getStatus.ts @@ -1,6 +1,6 @@ import { Readable } from 'stream' import { P2PCommandResponse } from '../../../@types/index.js' -import { ComputeJob } from '../../../@types/C2D.js' +import { ComputeJob } from '../../../@types/C2D/C2D.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' import { Handler } from '../handler/handler.js' import { ComputeGetStatusCommand } from '../../../@types/commands.js' @@ -44,9 +44,13 @@ export class ComputeGetStatusHandler extends Handler { // split jobId (which is already in hash-jobId format) and get the hash // then get jobId which might contain dashes as well const index = task.jobId.indexOf('-') - const hash = task.jobId.slice(0, index) - engines = [await this.getOceanNode().getC2DEngines().getC2DByHash(hash)] - jobId = task.jobId.slice(index + 1) + if (index > 0) { + const hash = task.jobId.slice(0, index) + engines = [await this.getOceanNode().getC2DEngines().getC2DByHash(hash)] + jobId = task.jobId.slice(index + 1) + } else { + engines = await this.getOceanNode().getC2DEngines().getAllEngines() + } } else { engines = await this.getOceanNode().getC2DEngines().getAllEngines() } @@ -57,7 +61,8 @@ export class ComputeGetStatusHandler extends Handler { task.agreementId, jobId ) - response.push(...jobs) + + if (jobs && jobs.length > 0) response.push(...jobs) } CORE_LOGGER.logMessage( 'ComputeGetStatusCommand Response: ' + JSON.stringify(response, null, 2), diff --git a/src/components/core/compute/getStreamableLogs.ts b/src/components/core/compute/getStreamableLogs.ts new file mode 100644 index 000000000..9b46e6ad2 --- /dev/null +++ b/src/components/core/compute/getStreamableLogs.ts @@ -0,0 +1,88 @@ +import { P2PCommandResponse } from '../../../@types/index.js' +import { CORE_LOGGER } from '../../../utils/logging/common.js' +import { Handler } from '../handler/handler.js' +import { ComputeGetStreamableLogsCommand } from '../../../@types/commands.js' +// import { checkNonce, NonceResponse } from '../utils/nonceHandler.js' +import { Stream } from 'stream' +import { + buildInvalidRequestMessage, + validateCommandParameters, + ValidateParams +} from '../../httpRoutes/validateCommands.js' +import { isAddress } from 'ethers' + +export class ComputeGetStreamableLogsHandler extends Handler { + validate(command: ComputeGetStreamableLogsCommand): ValidateParams { + const validation = validateCommandParameters(command, [ + 'consumerAddress', + 'signature', + 'nonce', + 'jobId' + ]) + if (validation.valid) { + if (command.consumerAddress && !isAddress(command.consumerAddress)) { + return buildInvalidRequestMessage( + 'Parameter : "consumerAddress" is not a valid web3 address' + ) + } + } + return validation + } + + async handle(task: ComputeGetStreamableLogsCommand): Promise { + const validationResponse = await this.verifyParamsAndRateLimits(task) + if (this.shouldDenyTaskHandling(validationResponse)) { + return validationResponse + } + + // TO DO: signature message to check + + // split jobId (which is already in hash-jobId format) and get the hash + // then get jobId which might contain dashes as well + const index = task.jobId.indexOf('-') + const hash = task.jobId.slice(0, index) + const jobId = task.jobId.slice(index + 1) + + // env might contain + let engine + try { + engine = await this.getOceanNode().getC2DEngines().getC2DByHash(hash) + } catch (e) { + return { + stream: null, + status: { + httpStatus: 500, + error: 'Invalid C2D Environment' + } + } + } + try { + const respStream = await engine.getStreamableLogs(jobId) + if (!respStream) { + return { + stream: null, + status: { + httpStatus: 404 + } + } + } + const response: P2PCommandResponse = { + stream: respStream as unknown as Stream, + status: { + httpStatus: 200 + } + } + + return response + } catch (error) { + CORE_LOGGER.error(error.message) + return { + stream: null, + status: { + httpStatus: 500, + error: error.message + } + } + } + } +} diff --git a/src/components/core/compute/index.ts b/src/components/core/compute/index.ts index 14d73f5cc..2beb8c621 100644 --- a/src/components/core/compute/index.ts +++ b/src/components/core/compute/index.ts @@ -4,3 +4,4 @@ export * from './stopCompute.js' export * from './getStatus.js' export * from './getResults.js' export * from './initialize.js' +export * from './getStreamableLogs.js' diff --git a/src/components/core/compute/initialize.ts b/src/components/core/compute/initialize.ts index 1c5f58bec..1b07a4882 100644 --- a/src/components/core/compute/initialize.ts +++ b/src/components/core/compute/initialize.ts @@ -1,5 +1,6 @@ import { Readable } from 'stream' -import { P2PCommandResponse } from '../../../@types/index.js' +import { P2PCommandResponse } from '../../../@types/OceanNode.js' +import { C2DClusterType } from '../../../@types/C2D/C2D.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' import { Handler } from '../handler/handler.js' import { ComputeInitializeCommand } from '../../../@types/commands.js' @@ -26,6 +27,8 @@ import { getConfiguration } from '../../../utils/index.js' import { sanitizeServiceFiles } from '../../../utils/util.js' import { FindDdoHandler } from '../handler/ddoHandler.js' import { isOrderingAllowedForAsset } from '../handler/downloadHandler.js' +import { getNonceAsNumber } from '../utils/nonceHandler.js' +import { C2DEngineDocker, getAlgorithmImage } from '../../c2d/compute_engine_docker.js' export class ComputeInitializeHandler extends Handler { validate(command: ComputeInitializeCommand): ValidateParams { const validation = validateCommandParameters(command, [ @@ -33,6 +36,7 @@ export class ComputeInitializeHandler extends Handler { 'algorithm', 'compute', 'consumerAddress' + // we might also need a "signature" (did + nonce) for confidential evm template 4 ]) if (validation.valid) { if (command.consumerAddress && !isAddress(command.consumerAddress)) { @@ -125,6 +129,37 @@ export class ComputeInitializeHandler extends Handler { } } + // docker images? + const clusters = config.c2dClusters + let hasDockerImages = false + for (const cluster of clusters) { + if (cluster.type === C2DClusterType.DOCKER) { + hasDockerImages = true + break + } + } + if (hasDockerImages) { + const algoImage = getAlgorithmImage(task.algorithm) + if (algoImage) { + const env = await this.getOceanNode() + .getC2DEngines() + .getExactComputeEnv(task.compute.env, ddo.chainId) + const validation: ValidateParams = await C2DEngineDocker.checkDockerImage( + algoImage, + env.platform && env.platform.length > 0 ? env.platform[0] : null + ) + if (!validation.valid) { + return { + stream: null, + status: { + httpStatus: validation.status, + error: `Initialize Compute failed for image ${algoImage} :${validation.reason}` + } + } + } + } + } + const signer = blockchain.getSigner() // check if oasis evm or similar @@ -144,19 +179,34 @@ export class ComputeInitializeHandler extends Handler { service.datatokenAddress, signer ) - if (isTemplate4 && (await isERC20Template4Active(ddo.chainId, signer))) { - // call smart contract to decrypt - const serviceIndex = AssetUtils.getServiceIndexById(ddo, service.id) - const filesObject = await getFilesObjectFromConfidentialEVM( - serviceIndex, - service.datatokenAddress, - signer, - task.consumerAddress, - null, // TODO, we will need to have a signature verification - ddo.id - ) - if (filesObject !== null) { - canDecrypt = true + if (isTemplate4) { + if (!task.signature) { + CORE_LOGGER.error( + 'Could not decrypt ddo files on template 4, missing consumer signature!' + ) + } else if (await isERC20Template4Active(ddo.chainId, signer)) { + // we need to get the proper data for the signature + const consumeData = + task.consumerAddress + + task.datasets[0].documentId + + getNonceAsNumber(task.consumerAddress) + // call smart contract to decrypt + const serviceIndex = AssetUtils.getServiceIndexById(ddo, service.id) + const filesObject = await getFilesObjectFromConfidentialEVM( + serviceIndex, + service.datatokenAddress, + signer, + task.consumerAddress, + task.signature, // we will need to have a signature verification + consumeData + ) + if (filesObject !== null) { + canDecrypt = true + } + } else { + CORE_LOGGER.error( + 'Could not decrypt ddo files on template 4, template is not active!' + ) } } } diff --git a/src/components/core/compute/startCompute.ts b/src/components/core/compute/startCompute.ts index 7d10ba894..d1065a0dc 100644 --- a/src/components/core/compute/startCompute.ts +++ b/src/components/core/compute/startCompute.ts @@ -1,9 +1,8 @@ import { Readable } from 'stream' import { P2PCommandResponse } from '../../../@types/index.js' -import { ComputeAsset } from '../../../@types/C2D.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' import { Handler } from '../handler/handler.js' -import { ComputeStartCommand } from '../../../@types/commands.js' +import { ComputeStartCommand, FreeComputeStartCommand } from '../../../@types/commands.js' import { getAlgoChecksums, validateAlgoForDataset } from './utils.js' import { ValidateParams, @@ -28,6 +27,7 @@ import { sanitizeServiceFiles } from '../../../utils/util.js' import { FindDdoHandler } from '../handler/ddoHandler.js' import { ProviderFeeValidation } from '../../../@types/Fees.js' import { isOrderingAllowedForAsset } from '../handler/downloadHandler.js' +import { getNonceAsNumber } from '../utils/nonceHandler.js' export class ComputeStartHandler extends Handler { validate(command: ComputeStartCommand): ValidateParams { const commandValidation = validateCommandParameters(command, [ @@ -36,7 +36,7 @@ export class ComputeStartHandler extends Handler { 'nonce', 'environment', 'algorithm', - 'dataset' + 'datasets' ]) if (commandValidation.valid) { if (!isAddress(command.consumerAddress)) { @@ -72,8 +72,6 @@ export class ComputeStartHandler extends Handler { } } const node = this.getOceanNode() - const assets: ComputeAsset[] = [task.dataset] - if (task.additionalDatasets) assets.push(...task.additionalDatasets) const { algorithm } = task let foundValidCompute = null @@ -93,7 +91,8 @@ export class ComputeStartHandler extends Handler { } } // check algo - for (const elem of [...[task.algorithm], ...assets]) { + for (const elem of [...[task.algorithm], ...task.datasets]) { + console.log(elem) const result: any = { validOrder: false } if ('documentId' in elem && elem.documentId) { result.did = elem.documentId @@ -166,6 +165,11 @@ export class ComputeStartHandler extends Handler { signer ) if (isTemplate4 && (await isERC20Template4Active(ddo.chainId, signer))) { + // we need to get the proper data for the signature + const consumeData = + task.consumerAddress + + task.datasets[0].documentId + + getNonceAsNumber(task.consumerAddress) // call smart contract to decrypt const serviceIndex = AssetUtils.getServiceIndexById(ddo, service.id) const filesObject = await getFilesObjectFromConfidentialEVM( @@ -173,8 +177,8 @@ export class ComputeStartHandler extends Handler { service.datatokenAddress, signer, task.consumerAddress, - task.signature, // TODO, we will need to have a signature verification - ddo.id + task.signature, // we will need to have a signature verification + consumeData ) if (filesObject != null) { canDecrypt = true @@ -219,6 +223,16 @@ export class ComputeStartHandler extends Handler { result.chainId = ddo.chainId const env = await engine.getComputeEnvironment(ddo.chainId, task.environment) + if (env.free) { + const error = `Free Jobs cannot be started here, use startFreeCompute` + return { + stream: null, + status: { + httpStatus: 500, + error + } + } + } if (!('transferTxId' in elem) || !elem.transferTxId) { const error = `Missing transferTxId for DDO ${elem.documentId}` return { @@ -312,11 +326,11 @@ export class ComputeStartHandler extends Handler { const { validUntil } = foundValidCompute const response = await engine.startComputeJob( - assets, + task.datasets, algorithm, task.output, - task.consumerAddress, envId, + task.consumerAddress, validUntil, chainId, agreementId @@ -345,3 +359,81 @@ export class ComputeStartHandler extends Handler { } } } + +// free compute +// - has no validation +export class FreeComputeStartHandler extends Handler { + validate(command: ComputeStartCommand): ValidateParams { + const commandValidation = validateCommandParameters(command, [ + 'algorithm', + 'datasets', + 'consumerAddress', + 'signature', + 'nonce' + ]) + if (commandValidation.valid) { + if (!isAddress(command.consumerAddress)) { + return buildInvalidRequestMessage( + 'Parameter : "consumerAddress" is not a valid web3 address' + ) + } + } + return commandValidation + } + + async handle(task: FreeComputeStartCommand): Promise { + const validationResponse = await this.verifyParamsAndRateLimits(task) + if (this.shouldDenyTaskHandling(validationResponse)) { + return validationResponse + } + let environment = null + try { + // get all envs and see if we have a free one + const allEnvs = await this.getOceanNode().getC2DEngines().fetchEnvironments() + for (const env of allEnvs) { + if (env.free) { + environment = env + } + } + if (!environment) + return { + stream: null, + status: { + httpStatus: 500, + error: 'This node does not have a free compute env' + } + } + const engine = await this.getOceanNode() + .getC2DEngines() + .getC2DByEnvId(environment.id) + const response = await engine.startComputeJob( + task.datasets, + task.algorithm, + task.output, + environment.id, + task.consumerAddress + ) + + CORE_LOGGER.logMessage( + 'FreeComputeStartCommand Response: ' + JSON.stringify(response, null, 2), + true + ) + + return { + stream: Readable.from(JSON.stringify(response)), + status: { + httpStatus: 200 + } + } + } catch (error) { + CORE_LOGGER.error(error.message) + return { + stream: null, + status: { + httpStatus: 500, + error: error.message + } + } + } + } +} diff --git a/src/components/core/compute/utils.ts b/src/components/core/compute/utils.ts index f4e60b95b..b5165046c 100644 --- a/src/components/core/compute/utils.ts +++ b/src/components/core/compute/utils.ts @@ -1,5 +1,5 @@ import { OceanNode } from '../../../OceanNode.js' -import { AlgoChecksums } from '../../../@types/C2D.js' +import { AlgoChecksums } from '../../../@types/C2D/C2D.js' import { ArweaveFileObject, IpfsFileObject, diff --git a/src/components/core/handler/coreHandlersRegistry.ts b/src/components/core/handler/coreHandlersRegistry.ts index 82f0a3540..2fbc304ae 100644 --- a/src/components/core/handler/coreHandlersRegistry.ts +++ b/src/components/core/handler/coreHandlersRegistry.ts @@ -24,10 +24,12 @@ import { Command } from '../../../@types/commands.js' import { ComputeGetEnvironmentsHandler, ComputeStartHandler, + FreeComputeStartHandler, ComputeStopHandler, ComputeGetStatusHandler, ComputeGetResultHandler, - ComputeInitializeHandler + ComputeInitializeHandler, + ComputeGetStreamableLogsHandler } from '../compute/index.js' import { StopNodeHandler } from '../admin/stopNodeHandler.js' import { ReindexTxHandler } from '../admin/reindexTxHandler.js' @@ -101,6 +103,10 @@ export class CoreHandlersRegistry { PROTOCOL_COMMANDS.COMPUTE_START, new ComputeStartHandler(node) ) + this.registerCoreHandler( + PROTOCOL_COMMANDS.FREE_COMPUTE_START, + new FreeComputeStartHandler(node) + ) this.registerCoreHandler(PROTOCOL_COMMANDS.COMPUTE_STOP, new ComputeStopHandler(node)) this.registerCoreHandler( PROTOCOL_COMMANDS.COMPUTE_GET_STATUS, @@ -110,6 +116,10 @@ export class CoreHandlersRegistry { PROTOCOL_COMMANDS.COMPUTE_GET_RESULT, new ComputeGetResultHandler(node) ) + this.registerCoreHandler( + PROTOCOL_COMMANDS.COMPUTE_GET_STREAMABLE_LOGS, + new ComputeGetStreamableLogsHandler(node) + ) this.registerCoreHandler( PROTOCOL_COMMANDS.COMPUTE_INITIALIZE, new ComputeInitializeHandler(node) diff --git a/src/components/core/handler/statusHandler.ts b/src/components/core/handler/statusHandler.ts index fe61963ec..c314dd924 100644 --- a/src/components/core/handler/statusHandler.ts +++ b/src/components/core/handler/statusHandler.ts @@ -14,16 +14,13 @@ export class StatusHandler extends Handler { return validateCommandParameters(command, []) } - async handle( - task: StatusCommand, - detailed: boolean = false - ): Promise { + async handle(task: StatusCommand): Promise { const checks = await this.verifyParamsAndRateLimits(task) if (checks.status.httpStatus !== 200 || checks.status.error !== null) { return checks } try { - const statusResult = await status(this.getOceanNode(), task.node, detailed) + const statusResult = await status(this.getOceanNode(), task.node, !!task.detailed) if (!statusResult) { return { stream: null, @@ -50,6 +47,7 @@ export class DetailedStatusHandler extends StatusHandler { } async handle(task: StatusCommand): Promise { - return await super.handle(task, true) + task.detailed = true + return await super.handle(task) } } diff --git a/src/components/core/utils/feesHandler.ts b/src/components/core/utils/feesHandler.ts index 39ffa4378..ba5a4f188 100644 --- a/src/components/core/utils/feesHandler.ts +++ b/src/components/core/utils/feesHandler.ts @@ -1,4 +1,4 @@ -import type { ComputeEnvironment } from '../../../@types/C2D.js' +import type { ComputeEnvironment } from '../../../@types/C2D/C2D.js' import { JsonRpcApiProvider, ethers, diff --git a/src/components/core/utils/nonceHandler.ts b/src/components/core/utils/nonceHandler.ts index bc6fa494f..60e6ef80b 100644 --- a/src/components/core/utils/nonceHandler.ts +++ b/src/components/core/utils/nonceHandler.ts @@ -4,6 +4,12 @@ import { ethers } from 'ethers' import { GENERIC_EMOJIS, LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { DATABASE_LOGGER } from '../../../utils/logging/common.js' import { AbstractNonceDatabase } from '../../database/BaseDatabase.js' +import { CoreHandlersRegistry } from '../handler/coreHandlersRegistry.js' +import { OceanNode } from '../../../OceanNode.js' +import { PROTOCOL_COMMANDS } from '../../../utils/constants.js' +import { NonceCommand } from '../../../@types/commands.js' +import { streamToString } from '../../../utils/util.js' +import { Readable } from 'node:stream' export function getDefaultErrorResponse(errorMessage: string): P2PCommandResponse { return { @@ -32,6 +38,18 @@ export type NonceResponse = { error?: string } +// we are doing the nonce stream response transformation in a few places +// so we can use this shortcut function when we just want the final number +export async function getNonceAsNumber(address: string): Promise { + const command: NonceCommand = { command: PROTOCOL_COMMANDS.NONCE, address } + const nonceResponse = await CoreHandlersRegistry.getInstance(OceanNode.getInstance()) + .getHandlerForTask(command) + .handle(command) + if (nonceResponse.stream) { + return await Number(streamToString(nonceResponse.stream as Readable)) + } + return 0 +} // get stored nonce for an address ( 0 if not found) export async function getNonce( db: AbstractNonceDatabase, diff --git a/src/components/core/utils/statusHandler.ts b/src/components/core/utils/statusHandler.ts index a7105442e..9b7063ccb 100644 --- a/src/components/core/utils/statusHandler.ts +++ b/src/components/core/utils/statusHandler.ts @@ -151,7 +151,16 @@ export async function status( // depends on request if (detailed) { - nodeStatus.c2dClusters = config.c2dClusters + nodeStatus.c2dClusters = [] + const engines = await oceanNode.getC2DEngines().getAllEngines() + for (const engine of engines) { + const type = await engine.getC2DType() + nodeStatus.c2dClusters.push({ + type, + hash: await engine.getC2DConfig().hash, + environments: await engine.getComputeEnvironments() + }) + } nodeStatus.supportedSchemas = typesenseSchemas.ddoSchemas } return nodeStatus diff --git a/src/components/database/BaseDatabase.ts b/src/components/database/BaseDatabase.ts index f02153415..664ec0c64 100644 --- a/src/components/database/BaseDatabase.ts +++ b/src/components/database/BaseDatabase.ts @@ -5,7 +5,7 @@ import { DATABASE_LOGGER } from '../../utils/logging/common.js' import { ElasticsearchSchema } from './ElasticSchemas.js' import { TypesenseSchema } from './TypesenseSchemas.js' -export abstract class AbstractNonceDatabase { +export abstract class AbstractDatabase { protected config: OceanNodeDBConfig protected schema: TypesenseSchema @@ -13,7 +13,8 @@ export abstract class AbstractNonceDatabase { this.config = config this.schema = schema } - +} +export abstract class AbstractNonceDatabase extends AbstractDatabase { abstract create(address: string, nonce: number): Promise abstract retrieve(address: string): Promise abstract update(address: string, nonce: number): Promise @@ -30,30 +31,14 @@ export abstract class AbstractNonceDatabase { } } -export abstract class AbstractIndexerDatabase { - protected config: OceanNodeDBConfig - protected schema: TypesenseSchema - - constructor(config: OceanNodeDBConfig, schema?: TypesenseSchema) { - this.config = config - this.schema = schema - } - +export abstract class AbstractIndexerDatabase extends AbstractDatabase { abstract create(network: number, lastIndexedBlock: number): Promise abstract retrieve(network: number): Promise abstract update(network: number, lastIndexedBlock: number): Promise abstract delete(network: number): Promise } -export abstract class AbstractLogDatabase { - protected config: OceanNodeDBConfig - protected schema: TypesenseSchema - - constructor(config: OceanNodeDBConfig, schema?: TypesenseSchema) { - this.config = config - this.schema = schema - } - +export abstract class AbstractLogDatabase extends AbstractDatabase { abstract insertLog(logEntry: Record): Promise abstract retrieveLog(id: string): Promise | null> abstract retrieveMultipleLogs( @@ -70,15 +55,7 @@ export abstract class AbstractLogDatabase { abstract getLogsCount(): Promise } -export abstract class AbstractDdoStateDatabase { - protected config: OceanNodeDBConfig - protected schema: TypesenseSchema - - constructor(config: OceanNodeDBConfig, schema?: TypesenseSchema) { - this.config = config - this.schema = schema - } - +export abstract class AbstractDdoStateDatabase extends AbstractDatabase { abstract create( chainId: number, did: string, diff --git a/src/components/database/C2DDatabase.ts b/src/components/database/C2DDatabase.ts new file mode 100644 index 000000000..a0dc32b5f --- /dev/null +++ b/src/components/database/C2DDatabase.ts @@ -0,0 +1,131 @@ +import path from 'path' +import fs from 'fs' +import { ComputeEnvironment, DBComputeJob } from '../../@types/C2D/C2D.js' +import { SQLiteCompute } from './sqliteCompute.js' +import { DATABASE_LOGGER } from '../../utils/logging/common.js' +import { OceanNodeDBConfig } from '../../@types/OceanNode.js' +import { TypesenseSchema } from './TypesenseSchemas.js' +import { AbstractDatabase } from './BaseDatabase.js' +import { OceanNode } from '../../OceanNode.js' +import { getDatabase } from '../../utils/database.js' + +export class C2DDatabase extends AbstractDatabase { + private provider: SQLiteCompute + + constructor(config: OceanNodeDBConfig, schema: TypesenseSchema) { + super(config, schema) + return (async (): Promise => { + // Fall back to SQLite + DATABASE_LOGGER.info('Creating C2DDatabase with SQLite') + + // Ensure the directory exists before instantiating SQLiteProvider + const dbDir = path.dirname('databases/c2dDatabase.sqlite') + if (!fs.existsSync(dbDir)) { + fs.mkdirSync(dbDir, { recursive: true }) + } + this.provider = new SQLiteCompute('databases/c2dDatabase.sqlite') + await this.provider.createTable() + + return this + })() as unknown as C2DDatabase + } + + async newJob(job: DBComputeJob): Promise { + const jobId = await this.provider.newJob(job) + return jobId + } + + async getJob( + jobId?: string, + agreementId?: string, + owner?: string + ): Promise { + const jobs = await this.provider.getJob(jobId, agreementId, owner) + return jobs + } + + async updateJob(job: DBComputeJob): Promise { + let updated = 0 + const previouslySaved: DBComputeJob[] = await this.getJob(job.jobId) + if (previouslySaved.length === 1) { + previouslySaved[0] = job + updated = await this.provider.updateJob(previouslySaved[0]) + if (!updated) { + DATABASE_LOGGER.error(`Unable to update job: ${job.jobId}. No rows affected!`) + } + } else { + DATABASE_LOGGER.error( + `Unable to update job: ${job.jobId}. It seems this jobID does not exist!` + ) + } + return updated + } + + async getRunningJobs(engine?: string, environment?: string): Promise { + return await this.provider.getRunningJobs(engine, environment) + } + + async deleteJob(jobId: string): Promise { + return await this.provider.deleteJob(jobId) + } + + /** + * + * @param environment compute environment to check for + * + * All compute engines have compute environments, + * and each compute environment specifies how long the output produced by + * a job is held by the node, before being deleted. + * When a job expiry is overdue, the node will delete all storage used by that job, + * and also delete the job record from the database + * @returns array of eexpired jobs + */ + async cleanStorageExpiredJobs(): Promise { + const allEngines = await OceanNode.getInstance(await getDatabase()).getC2DEngines() + .engines + + let cleaned = 0 + for (const engine of allEngines) { + const allEnvironments = await engine.getComputeEnvironments() + for (const computeEnvironment of allEnvironments) { + const finishedOrExpired: DBComputeJob[] = + await this.provider.getFinishedJobs(computeEnvironment) + for (const job of finishedOrExpired) { + if ( + computeEnvironment && + computeEnvironment.storageExpiry > Date.now() / 1000 + ) { + if (await engine.cleanupExpiredStorage(job)) { + cleaned++ + } + } + } + } + cleaned += await this.cleanOrphanJobs(allEnvironments) + } + return cleaned + } + + /** + * Clean orphan jobs. Stuff left on DB without existing environments associated + * @param existingEnvironments + * @returns number of orphans + */ + async cleanOrphanJobs(existingEnvironments: ComputeEnvironment[]) { + const c2dDatabase = await (await getDatabase()).c2d + const finishedOrExpired: DBComputeJob[] = await this.provider.getFinishedJobs() + const envIds: string[] = existingEnvironments.map((env) => { + return env.id + }) + let cleaned = 0 + for (const job of finishedOrExpired) { + if (job.environment && !envIds.includes(job.environment)) { + if (await c2dDatabase.deleteJob(job.jobId)) { + cleaned++ + } + } + } + DATABASE_LOGGER.info('Cleaned ' + cleaned + ' orphan C2D jobs') + return cleaned + } +} diff --git a/src/components/database/DatabaseFactory.ts b/src/components/database/DatabaseFactory.ts index fc1a85b56..870df32d6 100644 --- a/src/components/database/DatabaseFactory.ts +++ b/src/components/database/DatabaseFactory.ts @@ -29,6 +29,7 @@ import { TypesenseMetadataQuery } from './TypesenseMetadataQuery.js' import { IMetadataQuery } from '../../@types/DDO/IMetadataQuery.js' import { ElasticSearchMetadataQuery } from './ElasticSearchMetadataQuery.js' import { DB_TYPES } from '../../utils/index.js' +import { C2DDatabase } from './C2DDatabase.js' import { SQLLiteNonceDatabase } from './SQLLiteNonceDatabase.js' export class DatabaseFactory { @@ -85,6 +86,10 @@ export class DatabaseFactory { return this.createDatabase('ddo', config) } + static async createC2DDatabase(config: OceanNodeDBConfig): Promise { + return await new C2DDatabase(config, typesenseSchemas.c2dSchemas) + } + static createIndexerDatabase( config: OceanNodeDBConfig ): Promise { diff --git a/src/components/database/TypesenseSchemas.ts b/src/components/database/TypesenseSchemas.ts index 9b17ffcf6..0cdf7ea5e 100644 --- a/src/components/database/TypesenseSchemas.ts +++ b/src/components/database/TypesenseSchemas.ts @@ -48,6 +48,7 @@ export type TypesenseSchema = TypesenseCollectionCreateSchema export type TypesenseSchemas = { ddoSchemas: TypesenseSchema[] nonceSchemas: TypesenseSchema + c2dSchemas: TypesenseSchema indexerSchemas: TypesenseSchema logSchemas: TypesenseSchema orderSchema: TypesenseSchema @@ -61,6 +62,24 @@ export const typesenseSchemas: TypesenseSchemas = { enable_nested_fields: true, fields: [{ name: 'nonce', type: 'int64' }] }, + c2dSchemas: { + name: 'c2djobs', + enable_nested_fields: true, + fields: [ + // not really needed because it will be SQL Lite + { name: 'clusterHash', type: 'string', optional: false }, + { name: 'configlogURL', type: 'string', optional: false }, + { name: 'publishlogURL', type: 'string', optional: false }, + { name: 'algologURL', type: 'string', optional: false }, + { name: 'outputsURL', type: 'auto', optional: false }, + { name: 'stopRequested', type: 'bool', optional: false }, + { name: 'algorithm', type: 'auto', optional: false }, + { name: 'assets', type: 'auto', optional: false }, + { name: 'isRunning', type: 'bool', optional: false }, + { name: 'isStarted', type: 'bool', optional: false }, + { name: 'containerImage', type: 'string', optional: false } + ] + }, indexerSchemas: { name: 'indexer', enable_nested_fields: true, diff --git a/src/components/database/index.ts b/src/components/database/index.ts index cd5e9e430..0684f150b 100644 --- a/src/components/database/index.ts +++ b/src/components/database/index.ts @@ -10,27 +10,32 @@ import { AbstractDdoStateDatabase, AbstractIndexerDatabase, AbstractLogDatabase, - AbstractNonceDatabase, AbstractOrderDatabase } from './BaseDatabase.js' +import { C2DDatabase } from './C2DDatabase.js' import { DatabaseFactory } from './DatabaseFactory.js' import { ElasticsearchSchema } from './ElasticSchemas.js' +import { SQLLiteNonceDatabase } from './SQLLiteNonceDatabase.js' import { TypesenseSchema } from './TypesenseSchemas.js' export type Schema = ElasticsearchSchema | TypesenseSchema export class Database { ddo: AbstractDdoDatabase - nonce: AbstractNonceDatabase + nonce: SQLLiteNonceDatabase indexer: AbstractIndexerDatabase logs: AbstractLogDatabase order: AbstractOrderDatabase ddoState: AbstractDdoStateDatabase + c2d: C2DDatabase constructor(private config: OceanNodeDBConfig) { return (async (): Promise => { try { + // these 2 are using SQL Lite provider this.nonce = await DatabaseFactory.createNonceDatabase(this.config) + this.c2d = await DatabaseFactory.createC2DDatabase(this.config) + // only for Typesense or Elasticsearch if (hasValidDBConfiguration(this.config)) { // add this DB transport too // once we create a DB instance, the logger will be using this transport as well @@ -49,7 +54,7 @@ export class Database { this.ddoState = await DatabaseFactory.createDdoStateDatabase(this.config) } else { DATABASE_LOGGER.info( - 'Invalid URL. Only Nonce Database is initialized. Other databases are not available.' + 'Invalid DB URL. Only Nonce and C2D Databases are initialized. Other databases are not available.' ) } return this diff --git a/src/components/database/sqlite.ts b/src/components/database/sqlite.ts index e2f77de1a..8a17040a4 100644 --- a/src/components/database/sqlite.ts +++ b/src/components/database/sqlite.ts @@ -12,7 +12,7 @@ export class SQLiteProvider implements DatabaseProvider { private db: sqlite3.Database private schema: TypesenseSchema - constructor(private dbFilePath: string) { + constructor(dbFilePath: string) { this.db = new sqlite3.Database(dbFilePath) this.schema = typesenseSchemas.nonceSchemas } diff --git a/src/components/database/sqliteCompute.ts b/src/components/database/sqliteCompute.ts new file mode 100644 index 000000000..f92f7a0f9 --- /dev/null +++ b/src/components/database/sqliteCompute.ts @@ -0,0 +1,339 @@ +import { typesenseSchemas, TypesenseSchema } from './TypesenseSchemas.js' +import { + C2DStatusNumber, + C2DStatusText, + ComputeEnvironment, + type DBComputeJob +} from '../../@types/C2D/C2D.js' +import sqlite3, { RunResult } from 'sqlite3' +import { DATABASE_LOGGER } from '../../utils/logging/common.js' + +interface ComputeDatabaseProvider { + newJob(job: DBComputeJob): Promise + getJob(jobId?: string, agreementId?: string, owner?: string): Promise + updateJob(job: DBComputeJob): Promise + getRunningJobs(engine?: string, environment?: string): Promise + deleteJob(jobId: string): Promise + getFinishedJobs(): Promise +} + +export function generateUniqueID(): string { + return crypto.randomUUID().toString() +} + +function getInternalStructure(job: DBComputeJob): any { + const internalBlob = { + clusterHash: job.clusterHash, + configlogURL: job.configlogURL, + publishlogURL: job.publishlogURL, + algologURL: job.algologURL, + outputsURL: job.outputsURL, + stopRequested: job.stopRequested, + algorithm: job.algorithm, + assets: job.assets, + isRunning: job.isRunning, + isStarted: job.isStarted, + containerImage: job.containerImage + } + return internalBlob +} +export function generateBlobFromJSON(job: DBComputeJob): Buffer { + return Buffer.from(JSON.stringify(getInternalStructure(job))) +} + +export function generateJSONFromBlob(blob: any): Promise { + return JSON.parse(blob.toString()) +} + +// we cannot store array of strings, so we use string separators instead +export const STRING_SEPARATOR = '__,__' + +export function convertArrayToString(array: string[]) { + let str: string = '' + for (let i = 0; i < array.length; i++) { + str = str + array[i] + // Do not append comma at the end of last element + if (i < array.length - 1) { + str = str + STRING_SEPARATOR + } + } + return str +} +export function convertStringToArray(str: string) { + const arr: string[] = str.split(STRING_SEPARATOR) + return arr +} + +export class SQLiteCompute implements ComputeDatabaseProvider { + private db: sqlite3.Database + private schema: TypesenseSchema + + constructor(dbFilePath: string) { + this.db = new sqlite3.Database(dbFilePath) + this.schema = typesenseSchemas.c2dSchemas + } + + deleteJob(jobId: string): Promise { + const deleteSQL = ` + DELETE FROM ${this.schema.name} WHERE jobId = ? + ` + return new Promise((resolve, reject) => { + this.db.run(deleteSQL, [jobId], function (this: RunResult, err) { + if (err) reject(err) + else resolve(this.changes === 1) + }) + }) + } + + createTable() { + const createTableSQL = ` + CREATE TABLE IF NOT EXISTS ${this.schema.name} ( + owner TEXT, + did TEXT DEFAULT NULL, + jobId TEXT PRIMARY KEY, + dateCreated TEXT, + dateFinished TEXT DEFAULT NULL, + status INTEGER, + statusText TEXT, + results BLOB, + inputDID TEXT DEFAULT NULL, + algoDID TEXT DEFAULT NULL, + agreementId TEXT DEFAULT NULL, + expireTimestamp INTEGER, + environment TEXT DEFAULT NULL, + body BLOB + ); + ` + return new Promise((resolve, reject) => { + this.db.run(createTableSQL, (err) => { + if (err) reject(err) + else resolve() + }) + }) + } + + newJob(job: DBComputeJob): Promise { + // TO DO C2D + const insertSQL = ` + INSERT INTO ${this.schema.name} + ( + owner, + did, + jobId, + dateCreated, + status, + statusText, + inputDID, + algoDID, + agreementId, + expireTimestamp, + environment, + body + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?); + ` + const jobId = job.jobId || generateUniqueID() + job.jobId = jobId + return new Promise((resolve, reject) => { + this.db.run( + insertSQL, + [ + job.owner, + job.did, + jobId, + job.dateCreated || String(Date.now() / 1000), // seconds from epoch, + job.status || C2DStatusNumber.JobStarted, + job.statusText || C2DStatusText.JobStarted, + job.inputDID ? convertArrayToString(job.inputDID) : job.inputDID, + job.algoDID, + job.agreementId, + job.expireTimestamp, + job.environment, + generateBlobFromJSON(job) + ], + (err) => { + if (err) { + DATABASE_LOGGER.error('Could not insert C2D job on DB: ' + err.message) + reject(err) + } else { + DATABASE_LOGGER.info('Successfully inserted job with id:' + jobId) + resolve(jobId) + } + } + ) + }) + } + + /** + * on a get status for instance, all params are optional + * but at least one is required... In case we don't have a jobId, + * we have multiple results (by owner for instance) + * So, it refines the query or we can have more than 1 result (same as current implementation) + * @param jobId the job identifier + * @param agreementId the agreement identifier (did ?) + * @param owner the consumer address / job owner + * @returns job(s) + */ + getJob(jobId?: string, agreementId?: string, owner?: string): Promise { + const params: any = [] + let selectSQL = `SELECT * FROM ${this.schema.name} WHERE 1=1` + if (jobId) { + selectSQL += ` AND jobId = ?` + params.push(jobId) + } + if (agreementId) { + if (!agreementId.startsWith('0x')) { + agreementId = '0x' + agreementId + } + selectSQL += ` AND agreementId = ?` + params.push(agreementId) + } + if (owner) { + selectSQL += ` AND owner = ?` + params.push(owner) + } + + return new Promise((resolve, reject) => { + this.db.all(selectSQL, params, (err, rows: any[] | undefined) => { + if (err) { + DATABASE_LOGGER.error(err.message) + reject(err) + } else { + // also decode the internal data into job data + if (rows && rows.length > 0) { + const all: DBComputeJob[] = rows.map((row) => { + const body = generateJSONFromBlob(row.body) + delete row.body + const job: DBComputeJob = { ...row, ...body } + return job + }) + resolve(all) + } else { + DATABASE_LOGGER.error( + `Could not find any job with jobId: ${jobId}, agreementId: ${agreementId}, or owner: ${owner} in database!` + ) + resolve([]) + } + } + }) + }) + } + + updateJob(job: DBComputeJob): Promise { + if (job.dateFinished && job.isRunning) { + job.isRunning = false + } + // TO DO C2D + const data: any[] = [ + job.owner, + job.status, + job.statusText, + job.expireTimestamp, + generateBlobFromJSON(job), + job.jobId + ] + const updateSQL = ` + UPDATE ${this.schema.name} + SET + owner = ?, + status = ?, + statusText = ?, + expireTimestamp = ?, + body = ? + WHERE jobId = ?; + ` + return new Promise((resolve, reject) => { + this.db.run(updateSQL, data, function (this: RunResult, err: Error | null) { + if (err) { + DATABASE_LOGGER.error(`Error while updating job: ${err.message}`) + reject(err) + } else { + // number of rows updated successfully + resolve(this.changes) + } + }) + }) + } + + getRunningJobs(engine?: string, environment?: string): Promise { + const selectSQL = ` + SELECT * FROM ${this.schema.name} WHERE dateFinished IS NULL + ` + return new Promise((resolve, reject) => { + this.db.all(selectSQL, (err, rows: any[] | undefined) => { + if (err) { + DATABASE_LOGGER.error(err.message) + reject(err) + } else { + // also decode the internal data into job data + // get them all running + if (rows && rows.length > 0) { + const all: DBComputeJob[] = rows.map((row) => { + const body = generateJSONFromBlob(row.body) + delete row.body + const job: DBComputeJob = { ...row, ...body } + return job + }) + // filter them out + const filtered = all.filter((job) => { + let include = true + if (engine && engine !== job.clusterHash) { + include = false + } + if (environment && environment !== job.environment) { + include = false + } + if (!job.isRunning) { + include = false + } + return include + }) + resolve(filtered) + } else { + DATABASE_LOGGER.info('Could not find any running C2D jobs!') + resolve([]) + } + } + }) + }) + } + + getFinishedJobs(environment?: ComputeEnvironment): Promise { + // get jobs that already finished (have results), for this environment, and clear storage + job if expired + const selectSQL = ` + SELECT * FROM ${this.schema.name} WHERE environment = ? AND dateFinished IS NOT NULL OR results IS NOT NULL + ` + return new Promise((resolve, reject) => { + this.db.all(selectSQL, [environment.id], (err, rows: any[] | undefined) => { + if (err) { + DATABASE_LOGGER.error(err.message) + reject(err) + } else { + // also decode the internal data into job data + // get them all running + if (rows && rows.length > 0) { + const all: DBComputeJob[] = rows.map((row) => { + const body = generateJSONFromBlob(row.body) + delete row.body + const job: DBComputeJob = { ...row, ...body } + return job + }) + if (!environment) { + resolve(all) + } + // filter them out + const filtered = all.filter((job) => { + return environment && environment.id === job.environment + }) + resolve(filtered) + } else { + DATABASE_LOGGER.info( + 'Could not find any jobs for the specified enviroment: ' + environment.id + ) + resolve([]) + } + } + }) + }) + } +} diff --git a/src/components/httpRoutes/compute.ts b/src/components/httpRoutes/compute.ts index c127981d7..76c95d64c 100644 --- a/src/components/httpRoutes/compute.ts +++ b/src/components/httpRoutes/compute.ts @@ -2,17 +2,25 @@ import express from 'express' import { ComputeGetEnvironmentsHandler, ComputeStartHandler, + FreeComputeStartHandler, ComputeStopHandler, ComputeGetStatusHandler, ComputeGetResultHandler, - ComputeInitializeHandler + ComputeInitializeHandler, + ComputeGetStreamableLogsHandler } from '../core/compute/index.js' -import type { ComputeAlgorithm, ComputeAsset, ComputeOutput } from '../../@types/C2D.js' +import type { + ComputeAlgorithm, + ComputeAsset, + ComputeOutput +} from '../../@types/C2D/C2D.js' import type { ComputeStartCommand, + FreeComputeStartCommand, ComputeStopCommand, ComputeGetResultCommand, - ComputeGetStatusCommand + ComputeGetStatusCommand, + ComputeGetStreamableLogsCommand } from '../../@types/commands.js' import { streamToObject, streamToString } from '../../utils/util.js' @@ -75,6 +83,7 @@ computeRoutes.get(`${SERVICES_API_BASE_PATH}/computeEnvironments`, async (req, r } }) +// start compute computeRoutes.post(`${SERVICES_API_BASE_PATH}/compute`, async (req, res) => { try { HTTP_LOGGER.logMessage( @@ -90,11 +99,7 @@ computeRoutes.post(`${SERVICES_API_BASE_PATH}/compute`, async (req, res) => { nonce: (req.body.nonce as string) || null, environment: (req.body.environment as string) || null, algorithm: (req.body.algorithm as ComputeAlgorithm) || null, - dataset: (req.body.dataset as unknown as ComputeAsset) || null - } - if (req.body.additionalDatasets) { - startComputeTask.additionalDatasets = req.query - .additionalDatasets as unknown as ComputeAsset[] + datasets: (req.body.datasets as unknown as ComputeAsset[]) || null } if (req.body.output) { startComputeTask.output = req.body.output as ComputeOutput @@ -114,6 +119,46 @@ computeRoutes.post(`${SERVICES_API_BASE_PATH}/compute`, async (req, res) => { } }) +// free compute +computeRoutes.post(`${SERVICES_API_BASE_PATH}/freeCompute`, async (req, res) => { + try { + HTTP_LOGGER.logMessage( + `FreeComputeStartCommand request received as body params: ${JSON.stringify( + req.body + )}`, + true + ) + + const startComputeTask: FreeComputeStartCommand = { + command: PROTOCOL_COMMANDS.FREE_COMPUTE_START, + node: (req.body.node as string) || null, + consumerAddress: (req.body.consumerAddress as string) || null, + signature: (req.body.signature as string) || null, + nonce: (req.body.nonce as string) || null, + algorithm: (req.body.algorithm as ComputeAlgorithm) || null, + datasets: (req.body.datasets as unknown as ComputeAsset[]) || null + } + if (req.body.output) { + startComputeTask.output = req.body.output as ComputeOutput + } + + const response = await new FreeComputeStartHandler(req.oceanNode).handle( + startComputeTask + ) + if (response?.status?.httpStatus === 200) { + const jobs = await streamToObject(response.stream as Readable) + res.status(200).json(jobs) + } else { + HTTP_LOGGER.log(LOG_LEVELS_STR.LEVEL_INFO, `Error: ${response?.status?.error}`) + res.status(response?.status.httpStatus).json(response?.status?.error) + } + } catch (error) { + HTTP_LOGGER.log(LOG_LEVELS_STR.LEVEL_ERROR, `Error: ${error}`) + res.status(500).send('Internal Server Error') + } +}) + +// stop compute computeRoutes.put(`${SERVICES_API_BASE_PATH}/compute`, async (req, res) => { try { HTTP_LOGGER.logMessage( @@ -141,6 +186,7 @@ computeRoutes.put(`${SERVICES_API_BASE_PATH}/compute`, async (req, res) => { } }) +// get status computeRoutes.get(`${SERVICES_API_BASE_PATH}/compute`, async (req, res) => { try { HTTP_LOGGER.logMessage( @@ -165,6 +211,7 @@ computeRoutes.get(`${SERVICES_API_BASE_PATH}/compute`, async (req, res) => { } }) +// compute results computeRoutes.get(`${SERVICES_API_BASE_PATH}/computeResult`, async (req, res) => { try { HTTP_LOGGER.logMessage( @@ -196,6 +243,41 @@ computeRoutes.get(`${SERVICES_API_BASE_PATH}/computeResult`, async (req, res) => res.status(500).send('Internal Server Error') } }) + +// streaming logs +computeRoutes.get(`${SERVICES_API_BASE_PATH}/computeStreamableLogs`, async (req, res) => { + try { + HTTP_LOGGER.logMessage( + `ComputeGetStreamableLogsCommand request received with query: ${JSON.stringify( + req.query + )}`, + true + ) + const resultComputeTask: ComputeGetStreamableLogsCommand = { + command: PROTOCOL_COMMANDS.COMPUTE_GET_RESULT, + node: (req.query.node as string) || null, + consumerAddress: (req.query.consumerAddress as string) || null, + jobId: (req.query.jobId as string) || null, + signature: (req.query.signature as string) || null, + nonce: (req.query.nonce as string) || null + } + + const response = await new ComputeGetStreamableLogsHandler(req.oceanNode).handle( + resultComputeTask + ) + if (response.stream) { + res.status(response.status.httpStatus) + res.set(response.status.headers) + response.stream.pipe(res) + } else { + res.status(response.status.httpStatus).send(response.status.error) + } + } catch (error) { + HTTP_LOGGER.log(LOG_LEVELS_STR.LEVEL_ERROR, `Error: ${error}`) + res.status(500).send('Internal Server Error') + } +}) + computeRoutes.post(`${SERVICES_API_BASE_PATH}/initializeCompute`, async (req, res) => { try { HTTP_LOGGER.logMessage( diff --git a/src/components/httpRoutes/routeUtils.ts b/src/components/httpRoutes/routeUtils.ts index 4843f5d0e..401fee7e8 100644 --- a/src/components/httpRoutes/routeUtils.ts +++ b/src/components/httpRoutes/routeUtils.ts @@ -46,6 +46,16 @@ routesNames.set('computeStart', { method: 'post' }) +routesNames.set('freeCompute', { + path: `${SERVICES_API_BASE_PATH}/freeCompute`, + method: 'post' +}) + +routesNames.set('computeStreamableLogs', { + path: `${SERVICES_API_BASE_PATH}/computeStreamableLogs`, + method: 'GET' +}) + routesNames.set('computeStatus', { path: `${SERVICES_API_BASE_PATH}/compute`, method: 'get' diff --git a/src/components/storage/index.ts b/src/components/storage/index.ts index 90ddfd866..19b0add13 100644 --- a/src/components/storage/index.ts +++ b/src/components/storage/index.ts @@ -77,6 +77,11 @@ export abstract class Storage { } } + getStorageType(file: any): FileObjectType { + const { type } = file + return type + } + async getFileInfo( fileInfoRequest: FileInfoRequest, forceChecksum: boolean = false @@ -184,7 +189,7 @@ export class UrlStorage extends Storage { super(file, config) const [isValid, message] = this.validate() if (isValid === false) { - throw new Error(`Error validationg the URL file: ${message}`) + throw new Error(`Error validating the URL file: ${message}`) } } @@ -271,7 +276,7 @@ export class ArweaveStorage extends Storage { const [isValid, message] = this.validate() if (isValid === false) { - throw new Error(`Error validationg the Arweave file: ${message}`) + throw new Error(`Error validating the Arweave file: ${message}`) } } @@ -349,7 +354,7 @@ export class IpfsStorage extends Storage { const [isValid, message] = this.validate() if (isValid === false) { - throw new Error(`Error validationg the IPFS file: ${message}`) + throw new Error(`Error validating the IPFS file: ${message}`) } } diff --git a/src/index.ts b/src/index.ts index 1796a87b9..89c5b1e7c 100644 --- a/src/index.ts +++ b/src/index.ts @@ -17,7 +17,7 @@ import { OCEAN_NODE_LOGGER } from './utils/logging/common.js' import path from 'path' import { fileURLToPath } from 'url' import cors from 'cors' -import { scheduleCronJobs } from './utils/logging/logDeleteCron.js' +import { scheduleCronJobs } from './utils/cronjobs/scheduleCronJobs.js' import { requestValidator } from './components/httpRoutes/requestValidator.js' import { hasValidDBConfiguration } from './utils/database.js' diff --git a/src/test/data/assets.ts b/src/test/data/assets.ts index 476b78850..632394f5f 100644 --- a/src/test/data/assets.ts +++ b/src/test/data/assets.ts @@ -252,6 +252,7 @@ export const algoAsset = { files: { files: [ { + type: 'url', url: 'https://raw.githubusercontent.com/oceanprotocol/test-algorithm/master/javascript/algo.js', contentType: 'text/js', encoding: 'UTF-8' @@ -285,3 +286,99 @@ export const algoAsset = { created: '' } } + +export const completeDBComputeJob = { + owner: '0x6c957a45C801035d3297d43d0Ce83a237Ec5E0d1', + did: '', + jobId: '34aa4e7e-ce41-4547-b3e1-57aa1a7f97e6', + dateCreated: '1732720690.68', + dateFinished: '', + status: 70, + statusText: 'Job finished', + results: '', + inputDID: '', + algoDID: '', + agreementId: '0x56e2a0a9a6abcadac403dddc59858a5caf51ac286b401c811655b0235cd45da6', + expireTimestamp: 1732721290.68, + environment: '0x46f61c90309fcffa02e887e1a8a1ebdfeabe4f1ff279e306de2803df36bd46f7-free', + clusterHash: '0x3e072d2ac72e9ad87fed5a913caea960c89dfad85d447cbbc92c32457f0413e1', + configlogURL: '', + publishlogURL: '', + algologURL: '', + outputsURL: '', + stopRequested: false, + algorithm: { + documentId: 'did:op:39d9c2a7536865f9516b9f84432a624e25c8bb3e482de113ac9919af7d7a4866', + serviceId: 'db164c1b981e4d2974e90e61bda121512e6909c1035c908d68933ae4cfaba6b0', + meta: { language: '', version: '0.1', container: [Object] }, + transferTxId: '0x5c946d52cdd1623061330f455d4cb6d5898770987baa6539bda851d6c537cf6e' + }, + assets: [ + { + documentId: + 'did:op:ae13ce05f05457c041b013f41bf51400863eb5f387ba34e1b076f1f832a68071', + serviceId: 'ccb398c50d6abd5b456e8d7242bd856a1767a890b537c2f8c10ba8b8a10e6025', + transferTxId: '0xf14e89d0f0a80bf55392430e7479cac5eca6ed453e7b3ead99ab3c9820c9a411' + } + ], + isRunning: false, + isStarted: false, + containerImage: + 'node@sha256:1155995dda741e93afe4b1c6ced2d01734a6ec69865cc0997daf1f4db7259a36' +} + +export const dockerImageManifest = { + schemaVersion: 2, + mediaType: 'application/vnd.docker.distribution.manifest.v2+json', + config: { + mediaType: 'application/vnd.docker.container.image.v1+json', + size: 7286, + digest: 'sha256:386e0be86bde5eff9f85ea9eda02727dd4641664d746688b4049f79ef0cdb1c9' + }, + platform: { + architecture: 'amd64', + os: 'linux' + }, + layers: [ + { + mediaType: 'application/vnd.docker.image.rootfs.diff.tar.gzip', + size: 49557601, + digest: 'sha256:167b8a53ca4504bc6aa3182e336fa96f4ef76875d158c1933d3e2fa19c57e0c3' + }, + { + mediaType: 'application/vnd.docker.image.rootfs.diff.tar.gzip', + size: 24030522, + digest: 'sha256:b47a222d28fa95680198398973d0a29b82a968f03e7ef361cc8ded562e4d84a3' + }, + { + mediaType: 'application/vnd.docker.image.rootfs.diff.tar.gzip', + size: 64112257, + digest: 'sha256:debce5f9f3a9709885f7f2ad3cf41f036a3b57b406b27ba3a883928315787042' + }, + { + mediaType: 'application/vnd.docker.image.rootfs.diff.tar.gzip', + size: 211039785, + digest: 'sha256:1d7ca7cd2e066ae77ac6284a9d027f72a31a02a18bfc2a249ef2e7b01074338b' + }, + { + mediaType: 'application/vnd.docker.image.rootfs.diff.tar.gzip', + size: 3371, + digest: 'sha256:94c7791033e87c3ab82bf56f778253138bbd5caf172ead6fc0ce39d459560607' + }, + { + mediaType: 'application/vnd.docker.image.rootfs.diff.tar.gzip', + size: 47856943, + digest: 'sha256:72ab0dfaf5cb14ab09fd3478f8a01e3c3e21b7ad06e7b04ccac2f304d455ff45' + }, + { + mediaType: 'application/vnd.docker.image.rootfs.diff.tar.gzip', + size: 2280920, + digest: 'sha256:3316ed2852d408595e2dfc601d96f39f4a39747bd1eb2eb1b63b1f3d49c42919' + }, + { + mediaType: 'application/vnd.docker.image.rootfs.diff.tar.gzip', + size: 451, + digest: 'sha256:ef5505406bea98d0f6adb559b937c0dad0aef6d98500b1120c6e27c50fdf172b' + } + ] +} diff --git a/src/test/data/commands.ts b/src/test/data/commands.ts new file mode 100644 index 000000000..3b75a2225 --- /dev/null +++ b/src/test/data/commands.ts @@ -0,0 +1,30 @@ +export const freeComputeStartPayload = { + command: 'freeStartCompute', + consumerAddress: '0xC7EC1970B09224B317c52d92f37F5e1E4fF6B687', + nonce: '1', + signature: '0x123', + datasets: [ + { + fileObject: { + type: 'url', + url: 'https://raw.githubusercontent.com/oceanprotocol/ocean-cli/refs/heads/main/metadata/simpleComputeDataset.json', + method: 'GET' + } + } + ], + algorithm: { + fileObject: { + type: 'url', + url: 'https://raw.githubusercontent.com/oceanprotocol/ocean-cli/refs/heads/main/metadata/pythonAlgo.json', + method: 'GET' + }, + meta: { + container: { + image: 'my-compute-test', + tag: 'latest', + entrypoint: 'python $ALGO', + checksum: 'my-compute-checksum' + } + } + } +} diff --git a/src/test/integration/compute.test.ts b/src/test/integration/compute.test.ts index 395fd9900..9a8b1943d 100644 --- a/src/test/integration/compute.test.ts +++ b/src/test/integration/compute.test.ts @@ -4,19 +4,21 @@ import { ComputeStartHandler, ComputeStopHandler, ComputeGetStatusHandler, - ComputeInitializeHandler + ComputeInitializeHandler, + FreeComputeStartHandler } from '../../components/core/compute/index.js' import type { ComputeStartCommand, ComputeStopCommand, ComputeGetStatusCommand, - ComputeInitializeCommand + ComputeInitializeCommand, + FreeComputeStartCommand } from '../../@types/commands.js' import type { ComputeAsset, ComputeAlgorithm, ComputeEnvironment -} from '../../@types/C2D.js' +} from '../../@types/C2D/C2D.js' import { // DB_TYPES, ENVIRONMENT_VARIABLES, @@ -68,6 +70,8 @@ import { validateAlgoForDataset } from '../../components/core/compute/utils.js' +import { freeComputeStartPayload } from '../data/commands.js' + describe('Compute', () => { let previousConfiguration: OverrideEnvConfig[] let config: OceanNodeConfig @@ -114,8 +118,8 @@ describe('Compute', () => { ENVIRONMENT_VARIABLES.PRIVATE_KEY, ENVIRONMENT_VARIABLES.AUTHORIZED_DECRYPTERS, ENVIRONMENT_VARIABLES.ADDRESS_FILE, - ENVIRONMENT_VARIABLES.OPERATOR_SERVICE_URL - // ENVIRONMENT_VARIABLES.DB_URL, + ENVIRONMENT_VARIABLES.OPERATOR_SERVICE_URL, + ENVIRONMENT_VARIABLES.DOCKER_SOCKET_PATH // ENVIRONMENT_VARIABLES.DB_TYPE ], [ @@ -124,8 +128,8 @@ describe('Compute', () => { '0xc594c6e5def4bab63ac29eed19a134c130388f74f019bc74b8f4389df2837a58', JSON.stringify(['0xe2DD09d719Da89e5a3D0F2549c7E24566e947260']), `${homedir}/.ocean/ocean-contracts/artifacts/address.json`, - JSON.stringify(['http://localhost:31000']) - // 'http://localhost:9200', + JSON.stringify(['http://localhost:31000']), + '/var/run/docker.sock' // DB_TYPES.ELASTIC_SEARCH ] ) @@ -253,15 +257,18 @@ describe('Compute', () => { expect(response.stream).to.be.instanceOf(Readable) computeEnvironments = await streamToObject(response.stream as Readable) - // expect 2 envs - expect(computeEnvironments[DEVELOPMENT_CHAIN_ID].length === 2, 'incorrect length') + // expect 3 envs + expect(computeEnvironments[DEVELOPMENT_CHAIN_ID].length === 3, 'incorrect length') for (const computeEnvironment of computeEnvironments[DEVELOPMENT_CHAIN_ID]) { assert(computeEnvironment.id, 'id missing in computeEnvironments') assert( computeEnvironment.consumerAddress, 'consumerAddress missing in computeEnvironments' ) - assert(computeEnvironment.lastSeen, 'lastSeen missing in computeEnvironments') + // what is this? not present on free envs, so skip.. in any case the field is optional + if (!computeEnvironment.free) { + assert(computeEnvironment.lastSeen, 'lastSeen missing in computeEnvironments') + } assert(computeEnvironment.id.startsWith('0x'), 'id should start with 0x') assert(computeEnvironment.cpuNumber > 0, 'cpuNumber missing in computeEnvironments') assert(computeEnvironment.ramGB > 0, 'ramGB missing in computeEnvironments') @@ -547,11 +554,13 @@ describe('Compute', () => { signature, nonce, environment: firstEnv.id, - dataset: { - documentId: publishedComputeDataset.ddo.id, - serviceId: publishedComputeDataset.ddo.services[0].id, - transferTxId: '0x123' - }, + datasets: [ + { + documentId: publishedComputeDataset.ddo.id, + serviceId: publishedComputeDataset.ddo.services[0].id, + transferTxId: '0x123' + } + ], algorithm: { documentId: publishedAlgoDataset.ddo.id, serviceId: publishedAlgoDataset.ddo.services[0].id, @@ -562,6 +571,8 @@ describe('Compute', () => { // output?: ComputeOutput } const response = await new ComputeStartHandler(oceanNode).handle(startComputeTask) + console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') + console.log(response) assert(response, 'Failed to get response') // should fail, because txId '0x123' is not a valid order assert(response.status.httpStatus === 500, 'Failed to get 500 response') @@ -584,11 +595,13 @@ describe('Compute', () => { signature, nonce, environment: firstEnv.id, - dataset: { - documentId: publishedComputeDataset.ddo.id, - serviceId: publishedComputeDataset.ddo.services[0].id, - transferTxId: datasetOrderTxId - }, + datasets: [ + { + documentId: publishedComputeDataset.ddo.id, + serviceId: publishedComputeDataset.ddo.services[0].id, + transferTxId: datasetOrderTxId + } + ], algorithm: { documentId: publishedAlgoDataset.ddo.id, serviceId: publishedAlgoDataset.ddo.services[0].id, @@ -610,6 +623,52 @@ describe('Compute', () => { jobId = jobs[0].jobId }) + it('should start a free docker compute job', async () => { + const nonce = Date.now().toString() + const message = String(nonce) + // sign message/nonce + const consumerMessage = ethers.solidityPackedKeccak256( + ['bytes'], + [ethers.hexlify(ethers.toUtf8Bytes(message))] + ) + const messageHashBytes = ethers.toBeArray(consumerMessage) + const signature = await wallet.signMessage(messageHashBytes) + const startComputeTask: ComputeStartCommand = { + command: PROTOCOL_COMMANDS.FREE_COMPUTE_START, + consumerAddress: await wallet.getAddress(), + signature, + nonce, + environment: firstEnv.id, + datasets: [ + { + fileObject: computeAsset.services[0].files.files[0], + documentId: publishedComputeDataset.ddo.id, + serviceId: publishedComputeDataset.ddo.services[0].id, + transferTxId: datasetOrderTxId + } + ], + algorithm: { + fileObject: algoAsset.services[0].files.files[0], + documentId: publishedAlgoDataset.ddo.id, + serviceId: publishedAlgoDataset.ddo.services[0].id, + transferTxId: algoOrderTxId, + meta: publishedAlgoDataset.ddo.metadata.algorithm + }, + output: {} + // additionalDatasets?: ComputeAsset[] + // output?: ComputeOutput + } + const response = await new FreeComputeStartHandler(oceanNode).handle(startComputeTask) + assert(response, 'Failed to get response') + assert(response.status.httpStatus === 200, 'Failed to get 200 response') + assert(response.stream, 'Failed to get stream') + expect(response.stream).to.be.instanceOf(Readable) + + const jobs = await streamToObject(response.stream as Readable) + // eslint-disable-next-line prefer-destructuring + assert(jobs[0].jobId, 'failed to got job id') + }) + it('should stop a compute job', async () => { const nonce = Date.now().toString() const message = String(nonce) @@ -670,6 +729,20 @@ describe('Compute', () => { console.log(jobs) }) + it('should deny the Free job due to bad container image (directCommand payload)', async function () { + const command: FreeComputeStartCommand = freeComputeStartPayload + const handler = new FreeComputeStartHandler(oceanNode) + const response = await handler.handle(command) + assert(response.status.httpStatus === 500, 'Failed to get 500 response') + assert(response.stream === null, 'Should not get stream') + assert( + response.status.error.includes( + freeComputeStartPayload.algorithm.meta.container.image + ), + 'Should have image error' + ) + }) + it('should checkC2DEnvExists', async () => { const envId = '0x123' const result = await checkC2DEnvExists(envId, oceanNode) diff --git a/src/test/unit/commands.test.ts b/src/test/unit/commands.test.ts index e00874075..cbe355dce 100644 --- a/src/test/unit/commands.test.ts +++ b/src/test/unit/commands.test.ts @@ -289,7 +289,7 @@ describe('Commands and handlers', () => { nonce: '', environment: '', algorithm: undefined, - dataset: undefined + datasets: undefined } expect(startEnvHandler.validate(startEnvCommand).valid).to.be.equal(false) // ----------------------------------------- diff --git a/src/test/unit/compute.test.ts b/src/test/unit/compute.test.ts new file mode 100644 index 000000000..e4b7ba00a --- /dev/null +++ b/src/test/unit/compute.test.ts @@ -0,0 +1,237 @@ +import { C2DDatabase } from '../../components/database/C2DDatabase.js' +import { existsEnvironmentVariable, getConfiguration } from '../../utils/config.js' +import { typesenseSchemas } from '../../components/database/TypesenseSchemas.js' +import { + C2DStatusNumber, + C2DStatusText, + ComputeAlgorithm, + ComputeAsset, + ComputeEnvironment, + ComputeJob, + DBComputeJob, + RunningPlatform +} from '../../@types/C2D/C2D.js' +// import { computeAsset } from '../data/assets' +import { assert, expect } from 'chai' +import { + convertArrayToString, + convertStringToArray, + STRING_SEPARATOR +} from '../../components/database/sqliteCompute.js' +import { + buildEnvOverrideConfig, + OverrideEnvConfig, + setupEnvironment, + tearDownEnvironment +} from '../utils/utils.js' +import { OceanNodeConfig } from '../../@types/OceanNode.js' +import { ENVIRONMENT_VARIABLES } from '../../utils/constants.js' +import { completeDBComputeJob, dockerImageManifest } from '../data/assets.js' +import { omitDBComputeFieldsFromComputeJob } from '../../components/c2d/index.js' +import os from 'os' +import { checkManifestPlatform } from '../../components/c2d/compute_engine_docker.js' + +describe('Compute Jobs Database', () => { + let envOverrides: OverrideEnvConfig[] + let config: OceanNodeConfig + let db: C2DDatabase = null + let jobId: string = null + + const algorithm: ComputeAlgorithm = { + documentId: 'did:op:12345', + serviceId: '0x1828228' + } + const dataset: ComputeAsset = { + documentId: 'did:op:12345', + serviceId: '0x12345abc' + } + before(async () => { + envOverrides = buildEnvOverrideConfig( + [ENVIRONMENT_VARIABLES.DOCKER_SOCKET_PATH], + ['/var/lib/docker'] + ) + envOverrides = await setupEnvironment(null, envOverrides) + config = await getConfiguration(true) + db = await new C2DDatabase(config.dbConfig, typesenseSchemas.c2dSchemas) + }) + + it('should have at least a free docker compute environment', () => { + let size = 1 + if (existsEnvironmentVariable(ENVIRONMENT_VARIABLES.OPERATOR_SERVICE_URL, false)) { + expect(config.c2dClusters.length).to.be.at.least(2) + size = 2 + } else { + expect(config.c2dClusters.length).to.be.at.least(1) + } + const dockerConfig = config.c2dClusters[size - 1].connection + const freeEnv: ComputeEnvironment = dockerConfig.freeComputeOptions + expect(freeEnv.desc).to.be.equal('Free') + expect(freeEnv.free).to.be.equal(true) + expect(freeEnv.id).to.be.equal(config.c2dClusters[size - 1].hash + '-free') + }) + + it('should create a new C2D Job', async () => { + const job: DBComputeJob = { + owner: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260', + jobId: null, + dateCreated: null, + dateFinished: null, + status: C2DStatusNumber.JobStarted, + statusText: C2DStatusText.JobStarted, + results: null, + inputDID: ['did:op:1', 'did:op:2', 'did:op:3'], + expireTimestamp: 0, + + // internal structure + clusterHash: 'clusterHash', + configlogURL: 'http://localhost:8001', + publishlogURL: 'http://localhost:8001', + algologURL: 'http://localhost:8001', + outputsURL: 'http://localhost:8001', + stopRequested: false, + algorithm, + assets: [dataset], + isRunning: false, + isStarted: false, + containerImage: 'some container image' + } + + jobId = await db.newJob(job) + assert(jobId, 'Missing jobId identifier') + }) + + it('should get job by jobId', async () => { + const jobs = await db.getJob(jobId) + assert(jobs.length === 1, 'Could not get any job') + assert(jobs[0], 'Job should not be null') + assert(jobs[0].jobId === jobId, 'JobId mismatches') + }) + + it('should update job', async () => { + const jobs = await db.getJob(jobId) + const job = jobs[0] + // will update some fields + job.status = C2DStatusNumber.PullImage + job.isRunning = true + job.statusText = C2DStatusText.PullImage + + // update on DB + const updates = await db.updateJob(job) + expect(updates).to.be.equal(1) // updated 1 row + const updatedJobs = await db.getJob(jobId) + const updatedJob = updatedJobs[0] + assert(updatedJob, 'Job should not be null') + expect(updatedJob.status).to.be.equal(C2DStatusNumber.PullImage) + expect(updatedJob.isRunning).to.be.equal(true) + expect(updatedJob.statusText).to.be.equal(C2DStatusText.PullImage) + }) + + it('should get running jobs', async () => { + const job: DBComputeJob = { + owner: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947261', + jobId: null, + dateCreated: null, + dateFinished: null, + status: C2DStatusNumber.JobStarted, + statusText: C2DStatusText.JobStarted, + results: null, + inputDID: ['did:op:1', 'did:op:2'], + expireTimestamp: 1, + + // internal structure + clusterHash: 'clusterHash', + configlogURL: 'http://localhost:8000', + publishlogURL: 'http://localhost:8000', + algologURL: 'http://localhost:8000', + outputsURL: 'http://localhost:8000', + stopRequested: false, + algorithm, + assets: [dataset], + isRunning: false, + isStarted: false, + containerImage: 'another container image' + } + + const jobId = await db.newJob(job) + assert(jobId, 'Missing jobId identifier') + const existing = await db.getRunningJobs() + expect(existing.length === 2, 'No running jobs were found!') + + // Create a filter + const withEnv = await db.getRunningJobs(null, 'some environment') + expect(withEnv.length === 0, 'No running jobs were found for this environment') + // delete it + const deleted = await db.deleteJob(jobId) + expect(deleted === true, `Job ${jobId} was not deleted!`) + }) + + it('should delete the job by jobId', async () => { + const deleted = await db.deleteJob(jobId) + expect(deleted === true, `Job ${jobId} was not deleted!`) + }) + + it('should convert array of strings to a string', () => { + const inputDID = ['did:op:1', 'did:op:2', 'did:op:3'] + const expectedStr = + 'did:op:1' + STRING_SEPARATOR + 'did:op:2' + STRING_SEPARATOR + 'did:op:3' + expect(convertArrayToString(inputDID)).to.equal(expectedStr) + }) + + it('should convert concatenated string to a string array', () => { + const expectedArray = ['did:op:1', 'did:op:2', 'did:op:3'] + const str = 'did:op:1' + STRING_SEPARATOR + 'did:op:2' + STRING_SEPARATOR + 'did:op:3' + expect(convertStringToArray(str)).to.deep.equal(expectedArray) + }) + + it('should convert DBComputeJob to ComputeJob and omit internal DB data', () => { + const source: any = completeDBComputeJob + const output: ComputeJob = omitDBComputeFieldsFromComputeJob(source as DBComputeJob) + + expect(Object.prototype.hasOwnProperty.call(output, 'clusterHash')).to.be.equal(false) + expect(Object.prototype.hasOwnProperty.call(output, 'configlogURL')).to.be.equal( + false + ) + expect(Object.prototype.hasOwnProperty.call(output, 'publishlogURL')).to.be.equal( + false + ) + expect(Object.prototype.hasOwnProperty.call(output, 'algologURL')).to.be.equal(false) + expect(Object.prototype.hasOwnProperty.call(output, 'outputsURL')).to.be.equal(false) + expect(Object.prototype.hasOwnProperty.call(output, 'stopRequested')).to.be.equal( + false + ) + expect(Object.prototype.hasOwnProperty.call(output, 'algorithm')).to.be.equal(false) + expect(Object.prototype.hasOwnProperty.call(output, 'assets')).to.be.equal(false) + expect(Object.prototype.hasOwnProperty.call(output, 'isRunning')).to.be.equal(false) + expect(Object.prototype.hasOwnProperty.call(output, 'isStarted')).to.be.equal(false) + expect(Object.prototype.hasOwnProperty.call(output, 'containerImage')).to.be.equal( + false + ) + }) + + it('should check manifest platform against local platform env', () => { + const arch = os.machine() // ex: arm + const platform = os.platform() // ex: linux + const env: RunningPlatform = { + architecture: arch, + os: platform + } + const result: boolean = checkManifestPlatform(dockerImageManifest.platform, env) + // if all defined and a match its OK + if ( + dockerImageManifest.platform.os === env.os && + dockerImageManifest.platform.architecture === env.architecture + ) { + expect(result).to.be.equal(true) + } else { + // oterwise its NOT + expect(result).to.be.equal(false) + } + + // all good anyway, nothing on the manifest + expect(checkManifestPlatform(null, env)).to.be.equal(true) + }) + + after(async () => { + await tearDownEnvironment(envOverrides) + }) +}) diff --git a/src/test/unit/storage.test.ts b/src/test/unit/storage.test.ts index 4ef9fa174..5ea056e39 100644 --- a/src/test/unit/storage.test.ts +++ b/src/test/unit/storage.test.ts @@ -88,7 +88,7 @@ describe('URL Storage tests', () => { error = err } expect(error.message).to.eql( - 'Error validationg the URL file: URL or method are missing' + 'Error validating the URL file: URL or method are missing' ) file = { type: 'url', @@ -106,7 +106,7 @@ describe('URL Storage tests', () => { error = err } expect(error.message).to.eql( - 'Error validationg the URL file: URL or method are missing' + 'Error validating the URL file: URL or method are missing' ) }) it('URL validation fails on invalid method', () => { @@ -126,7 +126,7 @@ describe('URL Storage tests', () => { } catch (err) { error = err } - expect(error.message).to.eql('Error validationg the URL file: Invalid method for URL') + expect(error.message).to.eql('Error validating the URL file: Invalid method for URL') }) it('URL validation fails on filename', () => { @@ -147,7 +147,7 @@ describe('URL Storage tests', () => { error = err } expect(error.message).to.eql( - 'Error validationg the URL file: URL looks like a file path' + 'Error validating the URL file: URL looks like a file path' ) }) it('Gets download URL', () => { @@ -205,9 +205,7 @@ describe('Unsafe URL tests', () => { } catch (err) { error = err } - expect(error.message).to.eql( - 'Error validationg the URL file: URL is marked as unsafe' - ) + expect(error.message).to.eql('Error validating the URL file: URL is marked as unsafe') }) it('Should allow safe URL', () => { file = { @@ -254,7 +252,7 @@ describe('IPFS Storage tests', () => { } catch (err) { error = err } - expect(error.message).to.eql('Error validationg the IPFS file: Missing CID') + expect(error.message).to.eql('Error validating the IPFS file: Missing CID') }) after(() => { @@ -296,7 +294,7 @@ describe('Arweave Storage tests', () => { error = err } expect(error.message).to.eql( - 'Error validationg the Arweave file: Missing transaction ID' + 'Error validating the Arweave file: Missing transaction ID' ) }) @@ -376,7 +374,7 @@ describe('URL Storage with malformed URL', () => { error = err } expect(error.message).to.equal( - 'Error validationg the URL file: URL looks like a file path' + 'Error validating the URL file: URL looks like a file path' ) }) }) @@ -442,7 +440,7 @@ describe('Arweave Storage with malformed transaction ID', () => { error = err } expect(error.message).to.equal( - 'Error validationg the Arweave file: Transaction ID looks like an URL. Please specify URL storage instead.' + 'Error validating the Arweave file: Transaction ID looks like an URL. Please specify URL storage instead.' ) }) @@ -460,7 +458,7 @@ describe('Arweave Storage with malformed transaction ID', () => { error = err } expect(error.message).to.equal( - 'Error validationg the Arweave file: Transaction ID looks like a file path' + 'Error validating the Arweave file: Transaction ID looks like a file path' ) }) }) @@ -485,7 +483,7 @@ describe('Arweave Storage with malformed transaction ID', () => { error = err } expect(error.message).to.equal( - 'Error validationg the IPFS file: CID looks like an URL. Please specify URL storage instead.' + 'Error validating the IPFS file: CID looks like an URL. Please specify URL storage instead.' ) }) @@ -503,7 +501,7 @@ describe('Arweave Storage with malformed transaction ID', () => { error = err } expect(error.message).to.equal( - 'Error validationg the IPFS file: CID looks like a file path' + 'Error validating the IPFS file: CID looks like a file path' ) }) }) diff --git a/src/utils/asset.ts b/src/utils/asset.ts index 44eeddb51..b5605e8e6 100644 --- a/src/utils/asset.ts +++ b/src/utils/asset.ts @@ -192,7 +192,10 @@ export async function getFilesObjectFromConfidentialEVM( signer: Signer, consumerAddress: string, consumerSignature: string, - consumerData: string // ddo id + nonce + consumerData: string + // NOTE about signatures consume data: + // ddo id + nonce (for downloading) + // consumerAddress + datasets[0].documentId + nonce (for start/init compute) ): Promise { try { const currentProviderAddress = await signer.getAddress() diff --git a/src/utils/blockchain.ts b/src/utils/blockchain.ts index bfafb5665..0dc7d1356 100644 --- a/src/utils/blockchain.ts +++ b/src/utils/blockchain.ts @@ -58,6 +58,10 @@ export class Blockchain { return this.chainId } + public async getWalletAddress(): Promise { + return await this.signer.getAddress() + } + public async isNetworkReady(): Promise { if (this.networkAvailable && this.provider.ready) { return { ready: true } diff --git a/src/utils/config.ts b/src/utils/config.ts index 17d4fc020..c63954e5b 100644 --- a/src/utils/config.ts +++ b/src/utils/config.ts @@ -1,11 +1,11 @@ +import type { DenyList, OceanNodeConfig, OceanNodeKeys } from '../@types/OceanNode' import type { - DenyList, - OceanNodeConfig, - OceanNodeKeys, - OceanNodeDockerConfig -} from '../@types/OceanNode' -import type { C2DClusterInfo } from '../@types/C2D.js' -import { C2DClusterType } from '../@types/C2D.js' + C2DClusterInfo, + ComputeEnvironment, + C2DDockerConfig, + ComputeEnvironmentBaseConfig +} from '../@types/C2D/C2D.js' +import { C2DClusterType } from '../@types/C2D/C2D.js' import { createFromPrivKey } from '@libp2p/peer-id-factory' import { keys } from '@libp2p/crypto' import { @@ -18,7 +18,7 @@ import { defaultBootstrapAddresses, knownUnsafeURLs } from '../utils/constants.j import { LOG_LEVELS_STR, GENERIC_EMOJIS, getLoggerLevelEmoji } from './logging/Logger.js' import { RPCS } from '../@types/blockchain' -import { getAddress, Wallet } from 'ethers' +import { getAddress, Wallet, ZeroAddress } from 'ethers' import { FeeAmount, FeeStrategy, FeeTokens } from '../@types/Fees' import { getOceanArtifactsAdresses, @@ -26,6 +26,8 @@ import { } from '../utils/address.js' import { CONFIG_LOGGER } from './logging/common.js' import { create256Hash } from './crypt.js' +import { isDefined } from './util.js' +import os from 'os' // usefull for lazy loading and avoid boilerplate on other places let previousConfiguration: OceanNodeConfig = null @@ -53,7 +55,10 @@ export async function getPeerIdFromPrivateKey( function getEnvValue(env: any, defaultValue: any) { /* Gets value for an ENV var, returning defaultValue if not defined */ - return env != null ? (env as string) : defaultValue + if (env === null || env === undefined || (env as string).length === 0) { + return defaultValue + } + return env as string } function getIntEnvValue(env: any, defaultValue: number) { @@ -320,18 +325,6 @@ function getOceanNodeFees(supportedNetworks: RPCS, isStartup?: boolean): FeeStra } } -function getC2DDockerConfig(isStartup?: boolean): OceanNodeDockerConfig { - const config = { - socketPath: getEnvValue(process.env.DOCKER_SOCKET_PATH, null), - protocol: getEnvValue(process.env.DOCKER_PROTOCOL, null), - host: getEnvValue(process.env.DOCKER_HOST, null), - port: getIntEnvValue(process.env.DOCKER_PORT, 0), - caPath: getEnvValue(process.env.DOCKER_CA_PATH, null), - certPath: getEnvValue(process.env.DOCKER_CERT_PATH, null), - keyPath: getEnvValue(process.env.DOCKER_KEY_PATH, null) - } - return config -} // get C2D environments function getC2DClusterEnvironment(isStartup?: boolean): C2DClusterInfo[] { const clusters: C2DClusterInfo[] = [] @@ -358,10 +351,159 @@ function getC2DClusterEnvironment(isStartup?: boolean): C2DClusterInfo[] { ) } } + // docker clusters + const dockerConfig: C2DDockerConfig = { + socketPath: getEnvValue(process.env.DOCKER_SOCKET_PATH, null), + protocol: getEnvValue(process.env.DOCKER_PROTOCOL, null), + host: getEnvValue(process.env.DOCKER_HOST, null), + port: getIntEnvValue(process.env.DOCKER_PORT, 0), + caPath: getEnvValue(process.env.DOCKER_CA_PATH, null), + certPath: getEnvValue(process.env.DOCKER_CERT_PATH, null), + keyPath: getEnvValue(process.env.DOCKER_KEY_PATH, null), + environments: getDockerComputeEnvironments(isStartup) + } + + if (dockerConfig.socketPath || dockerConfig.host) { + const hash = create256Hash(JSON.stringify(dockerConfig)) + // get env values + dockerConfig.freeComputeOptions = getDockerFreeComputeOptions(hash, isStartup) + clusters.push({ + connection: dockerConfig, + hash, + type: C2DClusterType.DOCKER, + tempFolder: './c2d_storage/' + hash + }) + } return clusters } +// TODO C2D v2.0 +// eslint-disable-next-line no-unused-vars +function getDockerFreeComputeOptions( + clusterHash: string, + isStartup?: boolean +): ComputeEnvironment { + const defaultOptions: ComputeEnvironment = { + id: `${clusterHash}-free`, + cpuNumber: 1, + cpuType: '', + gpuNumber: 0, + ramGB: 1, + diskGB: 1, + priceMin: 0, + desc: 'Free', + currentJobs: 0, + maxJobs: 1, + consumerAddress: '', + storageExpiry: 600, + maxJobDuration: 600, // 10 minutes + feeToken: ZeroAddress, + chainId: 8996, + free: true, + platform: [{ architecture: os.machine(), os: os.platform() }] + } + + if (existsEnvironmentVariable(ENVIRONMENT_VARIABLES.DOCKER_FREE_COMPUTE, isStartup)) { + try { + const options: ComputeEnvironmentBaseConfig = JSON.parse( + process.env.DOCKER_FREE_COMPUTE + ) as ComputeEnvironmentBaseConfig + doComputeEnvChecks([options]) + const env = { ...options } as ComputeEnvironment + env.platform = [{ architecture: os.machine(), os: os.platform() }] + return env + } catch (error) { + CONFIG_LOGGER.logMessageWithEmoji( + `Invalid "${ENVIRONMENT_VARIABLES.DOCKER_FREE_COMPUTE.name}" env variable => ${process.env.DOCKER_FREE_COMPUTE}...`, + true, + GENERIC_EMOJIS.EMOJI_CROSS_MARK, + LOG_LEVELS_STR.LEVEL_ERROR + ) + } + } else { + CONFIG_LOGGER.warn( + `No options for ${ENVIRONMENT_VARIABLES.DOCKER_FREE_COMPUTE.name} were specified, using defaults.` + ) + } + return defaultOptions +} + +/** + * Reads a partial ComputeEnvironment setting (array of) + * @param isStartup for logging purposes + * @returns + * + * example: + * { + "cpuNumber": 2, + "ramGB": 4, + "diskGB": 10, + "desc": "2Cpu,2gbRam - price 1 OCEAN/minute, max 1 hour", + "maxJobs": 10, + "storageExpiry": 36000, + "maxJobDuration": 3600, + "chainId": 1, + "feeToken": "0x967da4048cD07aB37855c090aAF366e4ce1b9F48", + "priceMin": 1 + }, + */ +function getDockerComputeEnvironments(isStartup?: boolean): ComputeEnvironment[] { + if ( + existsEnvironmentVariable( + ENVIRONMENT_VARIABLES.DOCKER_COMPUTE_ENVIRONMENTS, + isStartup + ) + ) { + try { + const options: ComputeEnvironmentBaseConfig[] = JSON.parse( + process.env.DOCKER_COMPUTE_ENVIRONMENTS + ) as ComputeEnvironmentBaseConfig[] + doComputeEnvChecks(options) + const envs = { ...options } as ComputeEnvironment[] + envs.forEach((env) => { + env.platform = [{ architecture: os.machine(), os: os.platform() }] + }) + return envs + } catch (error) { + CONFIG_LOGGER.logMessageWithEmoji( + `Invalid "${ENVIRONMENT_VARIABLES.DOCKER_COMPUTE_ENVIRONMENTS.name}" env variable => ${process.env.DOCKER_COMPUTE_ENVIRONMENTS}...`, + true, + GENERIC_EMOJIS.EMOJI_CROSS_MARK, + LOG_LEVELS_STR.LEVEL_ERROR + ) + } + } else { + CONFIG_LOGGER.warn( + `No options for ${ENVIRONMENT_VARIABLES.DOCKER_COMPUTE_ENVIRONMENTS.name} were specified.` + ) + } + return null +} + +function doComputeEnvChecks(configEnv: ComputeEnvironmentBaseConfig[]): boolean { + for (const config of configEnv) { + if (config.feeToken && !isDefined(config.priceMin)) { + CONFIG_LOGGER.error( + "Please check your compute env settings: We have a fee token but we don't have a price!" + ) + return false + } + if (isDefined(config.priceMin) && !isDefined(config.feeToken)) { + CONFIG_LOGGER.error( + "Please check your compute env settings: We have a price but we don't have a fee token!" + ) + return false + } + if (config.storageExpiry < config.maxJobDuration) { + CONFIG_LOGGER.error( + 'Please check your compute env settings: "storageExpiry" should be greater than "maxJobDuration"!' + ) + return false + } + } +} + // connect interfaces (p2p or/and http) function getNodeInterfaces(isStartup: boolean = false) { let interfaces: string[] = ['P2P', 'HTTP'] @@ -605,7 +747,6 @@ async function getEnvConfig(isStartup?: boolean): Promise { indexingNetworks, feeStrategy: getOceanNodeFees(supportedNetworks, isStartup), c2dClusters: getC2DClusterEnvironment(isStartup), - dockerConfig: getC2DDockerConfig(isStartup), c2dNodeUri: getEnvValue(process.env.C2D_NODE_URI, ''), accountPurgatoryUrl: getEnvValue(process.env.ACCOUNT_PURGATORY_URL, ''), assetPurgatoryUrl: getEnvValue(process.env.ASSET_PURGATORY_URL, ''), @@ -637,6 +778,7 @@ function configChanged(previous: OceanNodeConfig, current: OceanNodeConfig): boo // useful for debugging purposes export async function printCurrentConfig() { const conf = await getConfiguration(true) + conf.keys.privateKey = '[*** HIDDEN CONTENT ***]' // hide private key console.log(JSON.stringify(conf, null, 4)) } diff --git a/src/utils/constants.ts b/src/utils/constants.ts index 2bf91f761..3a898351e 100644 --- a/src/utils/constants.ts +++ b/src/utils/constants.ts @@ -19,8 +19,10 @@ export const PROTOCOL_COMMANDS = { VALIDATE_DDO: 'validateDDO', COMPUTE_GET_ENVIRONMENTS: 'getComputeEnvironments', COMPUTE_START: 'startCompute', + FREE_COMPUTE_START: 'freeStartCompute', COMPUTE_STOP: 'stopCompute', COMPUTE_GET_STATUS: 'getComputeStatus', + COMPUTE_GET_STREAMABLE_LOGS: 'getComputeStreamableLogs', COMPUTE_GET_RESULT: 'getComputeResult', COMPUTE_INITIALIZE: 'initializeCompute', STOP_NODE: 'stopNode', @@ -48,9 +50,11 @@ export const SUPPORTED_PROTOCOL_COMMANDS: string[] = [ PROTOCOL_COMMANDS.VALIDATE_DDO, PROTOCOL_COMMANDS.COMPUTE_GET_ENVIRONMENTS, PROTOCOL_COMMANDS.COMPUTE_START, + PROTOCOL_COMMANDS.FREE_COMPUTE_START, PROTOCOL_COMMANDS.COMPUTE_STOP, PROTOCOL_COMMANDS.COMPUTE_GET_STATUS, PROTOCOL_COMMANDS.COMPUTE_GET_RESULT, + PROTOCOL_COMMANDS.COMPUTE_GET_STREAMABLE_LOGS, PROTOCOL_COMMANDS.COMPUTE_INITIALIZE, PROTOCOL_COMMANDS.STOP_NODE, PROTOCOL_COMMANDS.REINDEX_TX, @@ -318,6 +322,61 @@ export const ENVIRONMENT_VARIABLES: Record = { value: process.env.DB_TYPE, required: false }, + CRON_DELETE_DB_LOGS: { + name: 'CRON_DELETE_DB_LOGS', + value: process.env.CRON_DELETE_DB_LOGS, + required: false + }, + CRON_CLEANUP_C2D_STORAGE: { + name: 'CRON_CLEANUP_C2D_STORAGE', + value: process.env.CRON_CLEANUP_C2D_STORAGE, + required: false + }, + DOCKER_COMPUTE_ENVIRONMENTS: { + name: 'DOCKER_COMPUTE_ENVIRONMENTS', + value: process.env.DOCKER_COMPUTE_ENVIRONMENTS, + required: false + }, + DOCKER_FREE_COMPUTE: { + name: 'DOCKER_FREE_COMPUTE', + value: process.env.DOCKER_FREE_COMPUTE, + required: false + }, + DOCKER_SOCKET_PATH: { + name: 'DOCKER_SOCKET_PATH', + value: process.env.DOCKER_SOCKET_PATH, + required: false + }, + DOCKER_PROTOCOL: { + name: 'DOCKER_PROTOCOL', + value: process.env.DOCKER_PROTOCOL, + required: false + }, + DOCKER_HOST: { + name: 'DOCKER_HOST', + value: process.env.DOCKER_HOST, + required: false + }, + DOCKER_PORT: { + name: 'DOCKER_PORT', + value: process.env.DOCKER_PORT, + required: false + }, + DOCKER_CA_PATH: { + name: 'DOCKER_CA_PATH', + value: process.env.DOCKER_CA_PATH, + required: false + }, + DOCKER_CERT_PATH: { + name: 'DOCKER_CERT_PATH', + value: process.env.DOCKER_CERT_PATH, + required: false + }, + DOCKER_KEY_PATH: { + name: 'DOCKER_KEY_PATH', + value: process.env.DOCKER_KEY_PATH, + required: false + }, IS_BOOTSTRAP: { name: 'IS_BOOTSTRAP', value: process.env.IS_BOOTSTRAP, diff --git a/src/utils/cronjobs/scheduleCronJobs.ts b/src/utils/cronjobs/scheduleCronJobs.ts new file mode 100644 index 000000000..ceacf59ff --- /dev/null +++ b/src/utils/cronjobs/scheduleCronJobs.ts @@ -0,0 +1,48 @@ +// scheduleCronJobs.ts + +import { Database } from '../../components/database/index.js' +import { ENVIRONMENT_VARIABLES } from '../constants.js' +import { OCEAN_NODE_LOGGER } from '../logging/common.js' +import * as cron from 'node-cron' + +export function scheduleCronJobs(dbconn: Database | null) { + scheduleDeleteLogsJob(dbconn) + scheduleCleanExpiredC2DJobs(dbconn) +} + +function scheduleDeleteLogsJob(dbconn: Database | null) { + // Schedule the cron job to run daily at midnight + + if (dbconn && dbconn.logs) { + const expression = + process.env[ENVIRONMENT_VARIABLES.CRON_DELETE_DB_LOGS.name] || '0 0 * * *' + cron.schedule(expression, async () => { + const deletedLogsNum = await dbconn.logs.deleteOldLogs() + OCEAN_NODE_LOGGER.logMessage( + `${deletedLogsNum} old logs deleted successfully.`, + true + ) + }) + } else { + OCEAN_NODE_LOGGER.warn( + 'Logs CronJob: Database connection not established or logs instance not available (skipped).' + ) + } +} + +function scheduleCleanExpiredC2DJobs(dbconn: Database | null) { + // Schedule the cron job to run every 5 minutes or whatever specified + + if (dbconn && dbconn.c2d) { + const expression = + process.env[ENVIRONMENT_VARIABLES.CRON_CLEANUP_C2D_STORAGE.name] || '*/5 * * * *' + cron.schedule(expression, async () => { + const deleted = await dbconn.c2d.cleanStorageExpiredJobs() + OCEAN_NODE_LOGGER.info(`${deleted} old C2D jobs cleaned successfully.`) + }) + } else { + OCEAN_NODE_LOGGER.warn( + 'C2D CronJob: Database connection not established or C2D instance not available (skipped).' + ) + } +} diff --git a/src/utils/logging/logDeleteCron.ts b/src/utils/logging/logDeleteCron.ts deleted file mode 100644 index 80bc7d49a..000000000 --- a/src/utils/logging/logDeleteCron.ts +++ /dev/null @@ -1,23 +0,0 @@ -// scheduleCronJobs.ts - -import { Database } from '../../components/database/index.js' -import { OCEAN_NODE_LOGGER } from './common.js' -import * as cron from 'node-cron' - -export function scheduleCronJobs(dbconn: Database | null) { - // Schedule the cron job to run daily at midnight - cron.schedule('0 0 * * *', async () => { - if (dbconn && dbconn.logs) { - const deletedLogsNum = await dbconn.logs.deleteOldLogs() - OCEAN_NODE_LOGGER.logMessage( - `${deletedLogsNum} old logs deleted successfully.`, - true - ) - } else { - OCEAN_NODE_LOGGER.logMessage( - 'Database connection not established or logs instance not available.', - true - ) - } - }) -} diff --git a/src/utils/util.ts b/src/utils/util.ts index e16351c7f..a6c4b1339 100644 --- a/src/utils/util.ts +++ b/src/utils/util.ts @@ -145,3 +145,22 @@ export function asyncCallWithTimeout( export function isDefined(something: any): boolean { return something !== undefined && something !== null } + +export function deleteKeysFromObject(source: any, keys: string[]): any { + keys.forEach((keyName) => { + if (keyName in source) { + delete source[keyName] + } + }) + return source +} + +export function convertGigabytesToBytes(gigabytes: number): number { + if (gigabytes < 0) { + throw new Error('Input must be a non-negative number') + } + + const bytesInAGigabyte = 1024 ** 3 // 1 gigabyte = 1024^3 bytes + const bytes = gigabytes * bytesInAGigabyte + return bytes +}