diff --git a/.env.example b/.env.example index bd22b14b5..8c751a648 100644 --- a/.env.example +++ b/.env.example @@ -35,11 +35,9 @@ INGESTER_RPC_HOST='https://rpc:port' INGESTER_BACKFILLER_SOURCE_MODE=RPC #RPC or Bigtable INGESTER_BIG_TABLE_CONFIG='{creds="/usr/src/app/creds.json", timeout=1000}' -INGESTER_DISABLE_SYNCHRONIZER=true INGESTER_RUN_SEQUENCE_CONSISTENT_CHECKER=true # Optional, required only if it needs to run fork cleaner, default is false. Unstable as it removes forked items, but also removes some valid leafs. Recommended to use only! for testing purposes. INGESTER_RUN_FORK_CLEANER=false -INGESTER_RUN_DUMP_SYNCHRONIZE_ON_START=false INGESTER_RUN_BUBBLEGUM_BACKFILLER=true INGESTER_BACKFILLER_MODE=PersistAndIngest diff --git a/Cargo.lock b/Cargo.lock index 2ec45e2e1..09ed22787 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1217,7 +1217,11 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", "sync_wrapper", + "tokio", "tower", "tower-layer", "tower-service", @@ -1262,7 +1266,7 @@ version = "0.1.0" dependencies = [ "async-trait", "entities", - "flatbuffers", + "flatbuffers 23.5.26", "futures", "interface", "plerkle_serialization", @@ -1649,7 +1653,7 @@ dependencies = [ "anchor-lang 0.29.0", "async-trait", "blockbuster", - "borsh 0.9.3", + "borsh 0.10.3", "bytemuck", "chrono", "mpl-bubblegum", @@ -1719,9 +1723,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" [[package]] name = "bzip2" @@ -2920,6 +2924,17 @@ dependencies = [ "rustc_version 0.4.0", ] +[[package]] +name = "flatbuffers" +version = "24.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8add37afff2d4ffa83bc748a70b4b1370984f6980768554182424ef71447c35f" +dependencies = [ + "bitflags 1.3.2", + "rustc_version 0.4.0", + "serde", +] + [[package]] name = "flate2" version = "1.0.30" @@ -2966,6 +2981,21 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" +[[package]] +name = "function_name" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1ab577a896d09940b5fe12ec5ae71f9d8211fff62c919c03a3750a9901e98a7" +dependencies = [ + "function_name-proc-macro", +] + +[[package]] +name = "function_name-proc-macro" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673464e1e314dd67a0fd9544abc99e8eb28d0c7e3b69b033bcff9b2d00b87333" + [[package]] name = "futures" version = "0.3.30" @@ -3679,6 +3709,19 @@ dependencies = [ "generic-array", ] +[[package]] +name = "insta" +version = "1.41.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e9ffc4d4892617c50a928c52b2961cb5174b6fc6ebf252b2fac9d21955c48b8" +dependencies = [ + "console", + "lazy_static", + "linked-hash-map", + "serde", + "similar", +] + [[package]] name = "instant" version = "0.1.13" @@ -3688,6 +3731,42 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "integration_tests" +version = "0.1.0" +dependencies = [ + "anyhow", + "borsh 0.10.3", + "entities", + "flatbuffers 23.5.26", + "function_name", + "futures", + "insta", + "itertools 0.12.1", + "metrics-utils", + "mpl-token-metadata", + "nft_ingester", + "once_cell", + "plerkle_serialization", + "postgre-client", + "rocks-db", + "rocksdb", + "serde", + "serde_json", + "serial_test", + "solana-account-decoder", + "solana-client", + "solana-sdk", + "solana-transaction-status", + "spl-token 4.0.0", + "sqlx", + "tempfile", + "tokio", + "tokio-stream", + "tracing", + "usecase", +] + [[package]] name = "integrity-verification" version = "0.1.0" @@ -4116,6 +4195,12 @@ dependencies = [ "thiserror", ] +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -4536,6 +4621,7 @@ dependencies = [ "assertables", "async-channel 2.3.1", "async-trait", + "axum 0.6.20", "backfill-rpc", "base64 0.21.7", "bincode", @@ -4550,11 +4636,12 @@ dependencies = [ "entities", "env_logger 0.10.2", "figment", - "flatbuffers", + "flatbuffers 23.5.26", "futures", "grpc", "heck 0.5.0", "hex", + "indicatif", "interface", "itertools 0.12.1", "jemallocator", @@ -4581,6 +4668,7 @@ dependencies = [ "rand 0.8.5", "reqwest", "rocks-db", + "rocksdb", "schemars", "serde", "serde_cbor", @@ -4604,6 +4692,7 @@ dependencies = [ "testcontainers", "thiserror", "tokio", + "tokio-retry", "tokio-util 0.7.11", "tonic 0.10.2", "tracing", @@ -5272,7 +5361,7 @@ checksum = "69341a546676367be06201860e72bc7ebb8f228e7dcd1999aae715207ca0a816" dependencies = [ "bs58 0.4.0", "chrono", - "flatbuffers", + "flatbuffers 23.5.26", "serde", "solana-sdk", "solana-transaction-status", @@ -6249,10 +6338,12 @@ dependencies = [ "csv", "entities", "figment", + "flatbuffers 24.3.25", "futures-util", "hex", "indicatif", "interface", + "itertools 0.12.1", "lz4", "metrics-utils", "mockall", @@ -6673,6 +6764,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_spanned" version = "0.6.6" @@ -6781,16 +6882,43 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "serial_test" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" +dependencies = [ + "dashmap", + "futures", + "lazy_static", + "log", + "parking_lot 0.12.3", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "setup" version = "0.1.0" dependencies = [ "entities", + "flatbuffers 24.3.25", "metrics-utils", "nft_ingester", "postgre-client", "rand 0.8.5", "rocks-db", + "rocksdb", "solana-sdk", "sqlx", "tempfile", @@ -6955,6 +7083,12 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +[[package]] +name = "similar" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" + [[package]] name = "simpl" version = "0.1.0" @@ -9027,6 +9161,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-retry" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" +dependencies = [ + "pin-project", + "rand 0.8.5", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.23.4" @@ -9340,15 +9485,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -9691,7 +9836,7 @@ dependencies = [ "anyhow", "bincode", "bs58 0.4.0", - "flatbuffers", + "flatbuffers 23.5.26", "log", "parking_lot 0.12.3", "serde", diff --git a/Cargo.toml b/Cargo.toml index 023e513e5..aedc5dc8a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,14 +11,17 @@ members = [ "usecase", "tests/setup", "backfill_rpc", - "integrity_verification" + "integrity_verification", + "integration_tests" ] [workspace.dependencies] + # Tokio ecosystem tokio = { version = "1.37.0", features = ["full", "tracing"] } tokio-stream = "0.1.11" tokio-util = { version = "0.7.10", features = ["codec", "compat"] } +tokio-retry = "0.3.0" # Serde ecosystem and seryalization tools @@ -30,6 +33,8 @@ serde_with = "3.7.0" flatbuffers = "23.1.21" plerkle_serialization = "1.9.0" plerkle_messenger = { version = "1.9.0", features = ['redis'] } +borsh = "~0.10.3" +borsh-derive = "~0.10.3" # Database infrastructure @@ -54,6 +59,9 @@ tracing-test = { version = "0.2.4", features = ["no-env-filter"] } criterion = "0.5.1" prometheus-client = "0.21.2" pprof = { version = "0.13.0", features = ["_protobuf", "protobuf-codec"] } +function_name = "0.3.0" +insta = "1.34.0" +serial_test = "2.0.0" # HTTP, RPC and Schemas @@ -65,6 +73,7 @@ tonic-build = "0.10.2" prost = "0.12.3" jsonrpc-core = { version = "19.0.0", git = "https://github.com/RequescoS/jsonrpc-response-middleware.git" } open-rpc-schema = { version = "0.0.4" } +axum = "0.6" # Traits @@ -82,6 +91,7 @@ indicatif = "0.17" # Errors, futures, helpers, tools, time, etc... # Errors thiserror = { version = "1"} +anyhow = "1.0.75" # Clients arweave-rs = { version = "0.2.0", git = "https://github.com/RequescoS/arweave-rs.git", rev = "d8f5ef76f06c96afdf013fe5b62301790631b33f" } reqwest = { version = "0.11.23", features = ["json", "stream"] } @@ -145,6 +155,7 @@ solana-bigtable-connection = "1.10.31" spl-token = { version = "4.0.0", features = ["no-entrypoint"] } spl-token-2022 = { version = "1.0", features = ["no-entrypoint"] } spl-pod = "0.1.0" +solana-account-decoder = "~1.18.15" # Anchor ecosystem anchor-lang = "=0.30.1" diff --git a/Makefile b/Makefile index 8ecf5ae55..a06182d34 100644 --- a/Makefile +++ b/Makefile @@ -3,11 +3,14 @@ SHELL := /bin/bash build: - @docker compose -f docker-compose.yaml build ingester raw-backfiller das-api synchronizer core-indexing + @docker compose -f docker-compose.yaml build ingester raw-backfiller das-api synchronizer core-indexing slot-persister start: @docker compose -f docker-compose.yaml up -d ingester +start-slot-persister: + @docker compose -f docker-compose.yaml up -d slot-persister + start-synchronizer: @docker compose -f docker-compose.yaml up -d synchronizer diff --git a/backfill_rpc/src/slots_collector.rs b/backfill_rpc/src/slots_collector.rs index afbbe6b2b..6699e4232 100644 --- a/backfill_rpc/src/slots_collector.rs +++ b/backfill_rpc/src/slots_collector.rs @@ -13,7 +13,7 @@ const TRY_SKIPPED_BLOCKS_COUNT: u64 = 25; #[async_trait] impl SlotsGetter for BackfillRPC { - async fn get_slots( + async fn get_slots_sorted_desc( &self, collected_key: &solana_program::pubkey::Pubkey, start_at: u64, @@ -37,13 +37,16 @@ impl SlotsGetter for BackfillRPC { slots.insert(sig.slot); } if slots.len() >= rows_limit as usize { - return Ok(Vec::from_iter(slots)); + let mut slots = slots.into_iter().collect::>(); + slots.sort_unstable_by(|a, b| b.cmp(a)); + return Ok(slots); } } before = Some(last.signature); } - - Ok(Vec::from_iter(slots)) + let mut slots = slots.into_iter().collect::>(); + slots.sort_unstable_by(|a, b| b.cmp(a)); + Ok(slots) } } diff --git a/debug_tools/gpa_tool/README.md b/debug_tools/gpa_tool/README.md new file mode 100644 index 000000000..afe57d59e --- /dev/null +++ b/debug_tools/gpa_tool/README.md @@ -0,0 +1,89 @@ +# GPA tool + +## Script Description + +This script interacts with the Metaplex Core program on the SVM blockchains to retrieve assets associated with a specific collection and process a CSV file containing hexadecimal keys. It converts these keys to Base58 and identifies keys that are missing from the `assets_v3.csv` file. The result is saved to a new CSV file. + +--- + +## Prerequisites + +### Node.js and npm +Ensure you have **Node.js** installed (v14 or higher recommended). + +### Install Dependencies +Run the following command to install the required packages: + +```bash +npm install @metaplex-foundation/umi-bundle-defaults @metaplex-foundation/mpl-core @metaplex-foundation/umi bs58 fs +``` + +### Set Up a SVM RPC Node +The script uses the RPC endpoint `https://api.mainnet-beta.solana.com`. You can replace this with your preferred RPC endpoint if needed. + +--- + +## How to Use + +1. **Set Up the Collection Public Key** + Replace the placeholder public key `C8uYT2W93pBcmMVxSoUyzTW5mKVFTEpMNEPx1Y15MFyk` with the public key of the Core collection you want to query. + +2. **Prepare the CSV File** + - Create a CSV file named `assets_v3.csv` in the root directory. + - this file can be created by selecting keys from the PG. Here is SQL request to select asset keys from collection pointed above. `select ast_pubkey from assets_v3 where ast_collection = decode('a57708125d64ff943f1adf2fa45bfb7c0d8e581d6f3d036d6e41d64cd70434f3', 'HEX');` + - The file must include a header row with `ast_pubkey` as the column name. + - Each subsequent row should contain hexadecimal-encoded keys (e.g., `0x1234abcd`). + +3. **Run the Script** + Execute the script with: + + ```bash + ts-node app.ts + ``` + +4. **Output** + - The number of assets in the collection is printed to the console. + - Missing keys (those not found in the PG data) are written to `absentKeys.csv`. + +--- + +## Functionality Overview + +### `main` +1. Connects to the SVM RPC. +2. Fetches all assets associated with the specified collection key. +3. Extracts public keys of the assets. + +### `processCsvToBase58` +1. Reads the `assets_v3.csv` file. +2. Validates the file format. +3. Converts hexadecimal keys from the file to Base58. +4. Compares these keys with the blockchain data. +5. Writes absent keys to `absentKeys.csv`. + +--- + +## Example + +### Input: `assets_v3.csv` + +```csv +ast_pubkey +0x1234abcd +0x5678efgh +``` + +### Output: `absentKeys.csv` + +```csv +base58_key +3QJmV3qfvL9SuYo34YihAfMZhD2xBn84cvTL9W5ddWKH +``` + +--- + +## Notes +- Update the RPC endpoint and collection key as needed. +- Ensure you have sufficient permissions to read/write files in the directory. + +Feel free to extend the script for additional functionalities or integrate it into a larger SVM application. \ No newline at end of file diff --git a/debug_tools/gpa_tool/app.ts b/debug_tools/gpa_tool/app.ts new file mode 100644 index 000000000..646c018ab --- /dev/null +++ b/debug_tools/gpa_tool/app.ts @@ -0,0 +1,95 @@ +import { createUmi } from '@metaplex-foundation/umi-bundle-defaults' +import { + mplCore, + getAssetV1GpaBuilder, + Key, + updateAuthority, +} from '@metaplex-foundation/mpl-core' +import { generateSigner, publicKey, signerIdentity } from '@metaplex-foundation/umi'; +import * as fs from 'fs'; +import bs58 from 'bs58'; + +const umi = createUmi('https://api.mainnet-beta.solana.com', 'processed').use(mplCore()) +const payer = generateSigner(umi); + +umi.use(signerIdentity(payer)); + +async function main() { + const collectionKey = publicKey("C8uYT2W93pBcmMVxSoUyzTW5mKVFTEpMNEPx1Y15MFyk"); + + const assetsByCollection = await getAssetV1GpaBuilder(umi) + .whereField('key', Key.AssetV1) + .whereField( + 'updateAuthority', + updateAuthority('Collection', [collectionKey]) + ) + .getDeserialized(); + + const publicKeyMap: string[] = []; + + for (const element of assetsByCollection) { + publicKeyMap.push(element.publicKey.toString()); + } + + console.log(assetsByCollection.length); + + await processCsvToBase58("./assets_v3.csv", publicKeyMap); + +} + +async function processCsvToBase58(filePath: string, keysFromTheNetwork: string[]): Promise { + try { + if (!fs.existsSync(filePath)) { + console.error('File does not exist:', filePath); + return; + } + + const csvData = fs.readFileSync(filePath, 'utf-8'); + const rows = csvData.split('\n').filter(row => row.trim() !== ''); + + const header = rows[0].trim(); + if (header !== 'ast_pubkey') { + console.error('Invalid CSV format. Expected header: "ast_pubkey".'); + return; + } + + const keyFromTheDB: string[] = []; + + const hexValues = rows.slice(1); + hexValues.forEach((hex, index) => { + try { + let trimmedHex = hex.trim(); + + if (trimmedHex.startsWith('0x')) { + trimmedHex = trimmedHex.slice(2); + } + + if (!/^([0-9A-Fa-f]+)$/.test(trimmedHex)) { + console.warn(`Invalid HEX value at row ${index + 2}:`, hex); + return; + } + + const buffer = Buffer.from(trimmedHex, 'hex'); + const base58 = bs58.encode(buffer); + keyFromTheDB.push(base58); + } catch (error) { + console.error(`Error processing row ${index + 2}:`, error); + } + }); + + const absentKeys = keysFromTheNetwork.filter(key => !keyFromTheDB.includes(key)); + + const h = 'base58_key\n'; + + const r = absentKeys.map(key => key).join('\n'); + + const csvContent = h + r; + + fs.writeFileSync("./absentKeys.csv", csvContent, 'utf-8'); + } catch (error) { + console.error('Error reading or processing the file:', error); + } +} + + +main().catch(console.error); diff --git a/debug_tools/gpa_tool/package.json b/debug_tools/gpa_tool/package.json new file mode 100644 index 000000000..afb02266c --- /dev/null +++ b/debug_tools/gpa_tool/package.json @@ -0,0 +1,22 @@ +{ + "name": "core-demo", + "packageManager": "yarn@4.1.1", + "dependencies": { + "@metaplex-foundation/mpl-core": "^1.1.1", + "@metaplex-foundation/mpl-inscription": "^0.8.1", + "@metaplex-foundation/umi": "^0.9.2", + "@metaplex-foundation/umi-bundle-defaults": "^0.9.2", + "@solana/web3.js": "^1.95.8", + "bs58": "^6.0.0", + "fs": "^0.0.1-security" + }, + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "keywords": [], + "author": "", + "license": "ISC" +} diff --git a/docker-compose.yaml b/docker-compose.yaml index a3691eef1..e94ad889b 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -3,7 +3,7 @@ services: ingester: container_name: ingester restart: always - entrypoint: sh -c "if [ -z '$$MALLOC_CONF' ]; then exec ./ingester; else exec ./profiling_ingester; fi" + entrypoint: sh -c "if [ -z '$$MALLOC_CONF' ]; then exec ./ingester --slots-db-path $$INGESTER_SLOTS_DB_PATH --secondary-slots-db-path $$INGESTER_SECONDARY_SLOTS_DB_PATH; else exec ./profiling_ingester --slots-db-path $$INGESTER_SLOTS_DB_PATH --secondary-slots-db-path $$INGESTER_SECONDARY_SLOTS_DB_PATH; fi" env_file: - .env network_mode: host @@ -15,6 +15,8 @@ services: - ${INGESTER_PROFILING_FILE_PATH}:${INGESTER_PROFILING_FILE_PATH_CONTAINER}:rw - ${INGESTER_FILE_STORAGE_PATH}:${INGESTER_FILE_STORAGE_PATH_CONTAINER}:rw - ${INGESTER_MIGRATION_STORAGE_PATH}:${INGESTER_MIGRATION_STORAGE_PATH}:rw + - ${INGESTER_SLOTS_DB_PATH}:${INGESTER_SLOTS_DB_PATH}:ro + - ${INGESTER_SECONDARY_SLOTS_DB_PATH}:${INGESTER_SECONDARY_SLOTS_DB_PATH}:rw - ./creds.json:/usr/src/app/creds.json - ./migrations:/usr/src/app/migrations - ./arweave_wallet.json:/usr/src/app/arweave_wallet.json @@ -115,6 +117,41 @@ services: options: max-size: "2048m" + slot-persister: + container_name: slot-persister + restart: always + entrypoint: | + sh -c " + ARGS=\"--target-db-path $target_db_path\" + ARGS=\"$$ARGS --rpc-host $rpc_host\" + [ -n \"$start_slot\" ] && ARGS=\"$$ARGS --start-slot $start_slot\" + [ -n \"$big_table_credentials\" ] && ARGS=\"$$ARGS --big-table-credentials $big_table_credentials\" + [ -n \"$big_table_timeout\" ] && ARGS=\"$$ARGS --big-table-timeout $big_table_timeout\" + [ -n \"$metrics_port\" ] && ARGS=\"$$ARGS --metrics-port $metrics_port\" + [ -n \"$chunk_size\" ] && ARGS=\"$$ARGS --chunk-size $chunk_size\" + [ -n \"$max_concurrency\" ] && ARGS=\"$$ARGS --max-concurrency $max_concurrency\" + + if [ -z \"$MALLOC_CONF\" ]; then + exec ./slot_persister $$ARGS + else + exec ./profiling_slot_persister $$ARGS + fi" + env_file: + - .env + network_mode: host + volumes: + - ${target_db_path}:${target_db_path}:rw + - ${INGESTER_PROFILING_FILE_PATH}:${INGESTER_PROFILING_FILE_PATH_CONTAINER}:rw + - ${big_table_credentials:-/tmp/creds.json}:${big_table_credentials:-/tmp/creds.json} + - ./heaps:/usr/src/app/heaps:rw + stop_grace_period: 5m + build: + context: . + dockerfile: ingester.Dockerfile + logging: + options: + max-size: "2048m" + core-indexing: container_name: core-indexing restart: always diff --git a/entities/src/api_req_params.rs b/entities/src/api_req_params.rs index 3e57f42e7..63e02f4ce 100644 --- a/entities/src/api_req_params.rs +++ b/entities/src/api_req_params.rs @@ -135,7 +135,8 @@ pub struct GetAssetsByGroup { pub before: Option, pub after: Option, pub cursor: Option, - pub options: Option, + #[serde(default)] + pub options: GetByMethodsOptions, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] @@ -148,21 +149,24 @@ pub struct GetAssetsByOwner { pub before: Option, pub after: Option, pub cursor: Option, - pub options: Option, + #[serde(default)] + pub options: GetByMethodsOptions, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[serde(deny_unknown_fields, rename_all = "camelCase")] pub struct GetAsset { pub id: String, - pub options: Option, + #[serde(default)] + pub options: Options, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[serde(deny_unknown_fields, rename_all = "camelCase")] pub struct GetAssetBatch { pub ids: Vec, - pub options: Option, + #[serde(default)] + pub options: Options, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] @@ -188,7 +192,8 @@ pub struct GetAssetsByCreator { pub before: Option, pub after: Option, pub cursor: Option, - pub options: Option, + #[serde(default)] + pub options: GetByMethodsOptions, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] @@ -201,7 +206,8 @@ pub struct GetAssetsByAuthority { pub before: Option, pub after: Option, pub cursor: Option, - pub options: Option, + #[serde(default)] + pub options: GetByMethodsOptions, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] @@ -287,7 +293,8 @@ pub struct SearchAssets { pub cursor: Option, #[serde(default)] pub name: Option, - pub options: Option, + #[serde(default)] + pub options: SearchAssetsOptions, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema, Default)] @@ -412,10 +419,10 @@ impl From for SearchAssets { json_uri: value.json_uri, cursor: None, name: None, - options: Some(SearchAssetsOptions { + options: SearchAssetsOptions { show_unverified_collections: true, ..Default::default() - }), + }, } } } @@ -430,12 +437,12 @@ impl From for GetAsset { fn from(value: GetAssetV0) -> Self { Self { id: value.id, - options: Some(Options { + options: Options { show_unverified_collections: true, show_collection_metadata: false, show_inscription: false, show_fungible: false, - }), + }, } } } @@ -450,12 +457,12 @@ impl From for GetAssetBatch { fn from(value: GetAssetBatchV0) -> Self { Self { ids: value.ids, - options: Some(Options { + options: Options { show_unverified_collections: true, show_collection_metadata: false, show_inscription: false, show_fungible: false, - }), + }, } } } @@ -481,10 +488,10 @@ impl From for GetAssetsByAuthority { before: value.before, after: value.after, cursor: None, - options: Some(GetByMethodsOptions { + options: GetByMethodsOptions { show_unverified_collections: true, ..Default::default() - }), + }, } } } @@ -512,10 +519,10 @@ impl From for GetAssetsByCreator { before: value.before, after: value.after, cursor: None, - options: Some(GetByMethodsOptions { + options: GetByMethodsOptions { show_unverified_collections: true, ..Default::default() - }), + }, } } } @@ -541,10 +548,10 @@ impl From for GetAssetsByOwner { before: value.before, after: value.after, cursor: None, - options: Some(GetByMethodsOptions { + options: GetByMethodsOptions { show_unverified_collections: true, ..Default::default() - }), + }, } } } @@ -572,7 +579,7 @@ impl From for GetAssetsByGroup { before: value.before, after: value.after, cursor: None, - options: None, + options: Default::default(), } } } diff --git a/entities/src/enums.rs b/entities/src/enums.rs index bc7f3eeec..29f843eaa 100644 --- a/entities/src/enums.rs +++ b/entities/src/enums.rs @@ -33,9 +33,13 @@ pub enum SpecificationAssetClass { Nft, PrintableNft, ProgrammableNft, + // legacy, doesn't exist Print, + // legacy, doesn't exist TransferRestrictedNft, + // legacy, doesn't exist NonTransferableNft, + // legacy, doesn't exist IdentityNft, MplCoreAsset, MplCoreCollection, @@ -110,7 +114,7 @@ impl From for UseMethod { } } -#[derive(Serialize, Deserialize, Debug, Clone, Default, Copy)] +#[derive(Serialize, Deserialize, Debug, Clone, Default, Copy, PartialEq)] pub enum ChainMutability { // Original implementation also contain "Unknown" // enum variant, which is default. But we do not saved any diff --git a/entities/src/models.rs b/entities/src/models.rs index 6ad3a1090..170f98f01 100644 --- a/entities/src/models.rs +++ b/entities/src/models.rs @@ -33,11 +33,12 @@ impl UrlWithStatus { } } pub fn get_metadata_id(&self) -> Vec { + Self::get_metadata_id_for(&self.metadata_url) + } + + pub fn get_metadata_id_for(url: &str) -> Vec { let mut hasher = Sha256::new(); - // triming the url to remove any leading or trailing whitespaces, - // as some of the legacy versions of the database have contained the urls with whitespaces - let url = self.metadata_url.trim(); - hasher.update(url); + hasher.update(url.trim()); hasher.finalize().to_vec() } } @@ -127,7 +128,7 @@ pub struct CompleteAssetDetails { pub supply: Option>, pub seq: Option>, pub is_burnt: Updated, - pub was_decompressed: Updated, + pub was_decompressed: Option>, pub onchain_data: Option>, pub creators: Updated>, pub royalty_amount: Updated, @@ -155,6 +156,8 @@ pub struct CompleteAssetDetails { pub delegate: Updated>, pub owner_type: Updated, pub owner_delegate_seq: Updated>, + pub is_current_owner: Updated, + pub owner_record_pubkey: Pubkey, // Separate fields pub asset_leaf: Option>, @@ -626,6 +629,11 @@ pub struct SplMint { pub slot_updated: i64, pub write_version: u64, } +impl SplMint { + pub fn is_nft(&self) -> bool { + self.supply == 1 && self.decimals == 0 + } +} impl From<&Mint> for SplMint { fn from(value: &Mint) -> Self { diff --git a/grpc/src/mapper.rs b/grpc/src/mapper.rs index 12dc95cb9..4ce799624 100644 --- a/grpc/src/mapper.rs +++ b/grpc/src/mapper.rs @@ -47,7 +47,7 @@ impl From for AssetDetails { supply: value.supply.map(|v| v.into()), seq: value.seq.map(|v| v.into()), is_burnt: Some(value.is_burnt.into()), - was_decompressed: Some(value.was_decompressed.into()), + was_decompressed: value.was_decompressed.map(Into::into), creators: Some(value.creators.into()), royalty_amount: Some(value.royalty_amount.into()), authority: Some(value.authority.into()), @@ -138,8 +138,7 @@ impl TryFrom for CompleteAssetDetails { .ok_or(GrpcError::MissingField("is_burnt".to_string()))?, was_decompressed: value .was_decompressed - .map(Into::into) - .ok_or(GrpcError::MissingField("was_decompressed".to_string()))?, + .map(Into::into), creators: value .creators .map(TryInto::try_into) @@ -191,6 +190,8 @@ impl TryFrom for CompleteAssetDetails { .transpose()? .ok_or(GrpcError::MissingField("owner_type".to_string()))?, owner_delegate_seq, + is_current_owner: todo!(), + owner_record_pubkey: todo!(), asset_leaf: value.asset_leaf.map(TryInto::try_into).transpose()?, collection: value.collection.map(TryInto::try_into).transpose()?, onchain_data: value.chain_data.map(TryInto::try_into).transpose()?, diff --git a/ingester.Dockerfile b/ingester.Dockerfile index 76a8402a9..249ee2da7 100644 --- a/ingester.Dockerfile +++ b/ingester.Dockerfile @@ -36,12 +36,12 @@ RUN cargo chef cook --release --recipe-path recipe.json # Building the services FROM cacher AS builder COPY . . -RUN cargo build --release --bin ingester --bin api --bin raw_backfiller --bin synchronizer +RUN cargo build --release --bin ingester --bin api --bin raw_backfiller --bin synchronizer --bin slot_persister # Building the profiling feature services FROM cacher AS builder-with-profiling COPY . . -RUN cargo build --release --features profiling --bin ingester --bin api --bin raw_backfiller --bin synchronizer +RUN cargo build --release --features profiling --bin ingester --bin api --bin raw_backfiller --bin synchronizer --bin slot_persister # Final image FROM rust:1.76-slim-bullseye AS runtime @@ -55,10 +55,12 @@ COPY --from=builder /rust/target/release/ingester ${APP}/ingester COPY --from=builder /rust/target/release/raw_backfiller ${APP}/raw_backfiller COPY --from=builder /rust/target/release/api ${APP}/api COPY --from=builder /rust/target/release/synchronizer ${APP}/synchronizer +COPY --from=builder /rust/target/release/slot_persister ${APP}/slot_persister COPY --from=builder-with-profiling /rust/target/release/ingester ${APP}/profiling_ingester COPY --from=builder-with-profiling /rust/target/release/raw_backfiller ${APP}/profiling_raw_backfiller COPY --from=builder-with-profiling /rust/target/release/api ${APP}/profiling_api COPY --from=builder-with-profiling /rust/target/release/synchronizer ${APP}/profiling_synchronizer +COPY --from=builder-with-profiling /rust/target/release/slot_persister ${APP}/profiling_slot_persister WORKDIR ${APP} STOPSIGNAL SIGINT \ No newline at end of file diff --git a/integration_tests/Cargo.toml b/integration_tests/Cargo.toml new file mode 100644 index 000000000..a7589d323 --- /dev/null +++ b/integration_tests/Cargo.toml @@ -0,0 +1,38 @@ +[package] +edition = "2021" +name = "integration_tests" +publish = false +version = "0.1.0" + +[dependencies] +anyhow = { workspace = true } +borsh = { workspace = true } +flatbuffers = { workspace = true } +function_name = { workspace = true } +futures = { workspace = true } +insta = { workspace = true, features = ["json"] } +itertools = { workspace = true } +tracing = { workspace = true } +mpl-token-metadata = { workspace = true } +once_cell = { workspace = true } +plerkle_serialization = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serial_test = { workspace = true } +solana-account-decoder = { workspace = true } +solana-client = { workspace = true } +solana-sdk = { workspace = true } +solana-transaction-status = { workspace = true } +spl-token = { workspace = true, features = ["no-entrypoint"] } +sqlx = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +rocksdb = { workspace = true } +tempfile = {workspace = true} + +nft_ingester = { path = "../nft_ingester" } +postgre-client = { path = "../postgre-client", features = ["integration_tests"] } +metrics-utils = { path = "../metrics_utils" } +rocks-db = { path = "../rocks-db", features = ["integration_tests"] } +usecase = { path = "../usecase" } +entities = { path = "../entities" } diff --git a/integration_tests/README.md b/integration_tests/README.md new file mode 100644 index 000000000..1b533abe5 --- /dev/null +++ b/integration_tests/README.md @@ -0,0 +1,38 @@ +# Integration Tests + +> [!NOTE] +> These tests were taken from the reference DAS API implementation, you can find original code in [this repository](https://github.com/metaplex-foundation/digital-asset-rpc-infrastructure). + +This Cargo package helps us run multi-package tests in our workspace. This setup is inspired by the tokio integration test setup. + +## Setup + +First setup a local Postgres database and export the postgres database URL as follows: +```export DATABASE_TEST_URL=postgres://postgres@localhost/``` + +Also gain access to mainnet RPCs and devnet RPCs and export the URLs as follows. Currently, +we only use these URLs for reading data and storing it locally. + +``` +export DEVNET_RPC_URL=... +export MAINNET_RPC_URL=... +``` + +Afterwards, you can simply run the following command to run tests: +```cargo test``` + +The tests run migrations from fresh and populates the data. You don't need to any other setup. + +## How do tests work? + +Most tests currently are configured to run as "scenario" tests. They pull test input data from mainnet/devnet +and store it locally to avoid tests breaking if mainnet/devnet data ever changes. Afterwards, they feed +the tests to the `handle_account_update` and `handle_transaction` functions of the ingester and populate +the indexed data in the database. Finally, they create an instance of the `DasApi` struct, run queries against +this struct, store the results of these queries as snapshots through the `insta` testing library and assert that +future runs of the same test produce the same snapshot. + +Note that tests do not actually run the ingester binaries and the API binaries and only test the primary internal functions +of each, e.g. `handle_account_update` and `handle_transaction` for the ingester and functions like `search_assets` +and `get_asset` for the binary. By following this approach, we are able to test the vast majority of the code +in a way that's easy to setup and fast to run -- avoiding to have to compile and run multiple binaries. diff --git a/integration_tests/src/account_update_tests.rs b/integration_tests/src/account_update_tests.rs new file mode 100644 index 000000000..5711ad959 --- /dev/null +++ b/integration_tests/src/account_update_tests.rs @@ -0,0 +1,273 @@ +use std::sync::Arc; + +use borsh::BorshSerialize; +use entities::api_req_params::{GetAsset, Options}; +use function_name::named; + +use plerkle_serialization::{ + root_as_account_info, serializer::serialize_account, + solana_geyser_plugin_interface_shims::ReplicaAccountInfoV2, +}; +use serial_test::serial; +use solana_sdk::pubkey::Pubkey; +use solana_sdk::{program_option::COption, program_pack::Pack}; +use spl_token::state::{Account as TokenAccount, AccountState, Mint}; +use tokio::sync::Mutex; +use tokio::task::JoinSet; + +use super::common::*; + +#[derive(Debug, Clone)] +// TODO: Add amount +struct TokenAccountUpdate { + owner: Pubkey, + delegate: COption, + state: AccountState, +} + +#[derive(Debug, Clone)] +struct MintAccountUpdate { + supply: u64, +} + +#[derive(Debug, Clone)] +struct MetadataAccountUpdate { + primary_sale_happened: bool, + is_mutable: bool, +} + +#[derive(Debug, Clone)] +enum AccountUpdate { + TokenAccount(TokenAccountUpdate), + #[allow(dead_code)] + MintAccount(MintAccountUpdate), + MetadataAccount(MetadataAccountUpdate), + None, +} + +macro_rules! update_field { + ($field:expr, $value:expr) => { + assert_ne!($field, $value); + $field = $value; + }; +} + +async fn index_account_update(setup: &TestSetup, pubkey: Pubkey, update: AccountUpdate, slot: u64) { + let account_bytes = cached_fetch_account(setup, pubkey, None).await; + + let account_info = root_as_account_info(&account_bytes).unwrap(); + let account_data = account_info.data().unwrap().iter().collect::>(); + + let modified_account_data = match update { + AccountUpdate::TokenAccount(TokenAccountUpdate { + owner, + delegate, + state, + }) => { + let mut account = TokenAccount::unpack(&account_data).unwrap(); + + update_field!(account.owner, owner); + update_field!(account.delegate, delegate); + update_field!(account.state, state); + + let mut data = vec![0; TokenAccount::LEN]; + TokenAccount::pack(account, data.as_mut_slice()).unwrap(); + data + } + AccountUpdate::MintAccount(MintAccountUpdate { supply }) => { + let mut account = Mint::unpack(&account_data).unwrap(); + + update_field!(account.supply, supply); + + let mut data = vec![0; Mint::LEN]; + Mint::pack(account, data.as_mut_slice()).unwrap(); + data + } + AccountUpdate::MetadataAccount(MetadataAccountUpdate { + primary_sale_happened, + is_mutable, + }) => { + let mut account: mpl_token_metadata::accounts::Metadata = + mpl_token_metadata::accounts::Metadata::from_bytes(&account_data).unwrap(); + + update_field!(account.primary_sale_happened, primary_sale_happened); + update_field!(account.is_mutable, is_mutable); + + account.try_to_vec().unwrap() + } + AccountUpdate::None => account_data, + }; + + let fbb = flatbuffers::FlatBufferBuilder::new(); + + let account_info = ReplicaAccountInfoV2 { + pubkey: &account_info.pubkey().unwrap().0, + lamports: account_info.lamports(), + owner: account_info.owner().unwrap().0.as_ref(), + executable: account_info.executable(), + rent_epoch: account_info.rent_epoch(), + data: &modified_account_data, + write_version: 0, + txn_signature: None, + }; + let is_startup = false; + + let fbb = serialize_account(fbb, &account_info, slot, is_startup); + index_account_bytes(setup, fbb.finished_data().to_vec()).await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_account_updates() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: None, + clear_db: true, + }, + ) + .await; + let mint = Pubkey::try_from("843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f").unwrap(); + + let nft_accounts = get_nft_accounts(&setup, mint).await; + + let request = GetAsset { + id: mint.to_string(), + options: Options::default(), + }; + + let random_pub_key = Pubkey::try_from("1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM").unwrap(); + let random_pub_key2 = Pubkey::try_from("1111111ogCyDbaRMvkdsHB3qfdyFYaG1WtRUAfdh").unwrap(); + + #[derive(Clone)] + struct NamedUpdate { + name: String, + account: Pubkey, + update: AccountUpdate, + } + + let token_updated = NamedUpdate { + name: "token".to_string(), + account: nft_accounts.token, + update: AccountUpdate::TokenAccount(TokenAccountUpdate { + owner: random_pub_key, + delegate: COption::Some(random_pub_key2), + state: AccountState::Initialized, + }), + }; + let mint_updated = NamedUpdate { + name: "mint".to_string(), + account: nft_accounts.mint, + // You can't easily change an NFT's mint account. The supply is fixed to 1 unless you burn + // the token account, the decimals are fixed at 0 and the freeze authority is not displayed + // in the API. + update: AccountUpdate::None, + }; + let metadata_updated = NamedUpdate { + name: "metadata".to_string(), + account: nft_accounts.metadata, + update: AccountUpdate::MetadataAccount(MetadataAccountUpdate { + primary_sale_happened: true, + is_mutable: false, + }), + }; + let named_updates = vec![token_updated, mint_updated, metadata_updated]; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + // Test that stale updates are rejected and new updates are accepted + for named_update in named_updates.clone() { + if let AccountUpdate::None = named_update.update { + continue; + } + + setup.clean_up_data_bases().await; + + index_nft(&setup, mint).await; + + let response = setup + .das_api + .get_asset(request.clone(), mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name.clone(), response); + + index_account_update( + &setup, + named_update.account, + named_update.update.clone(), + DEFAULT_SLOT - 1, + ) + .await; + + let response_stale_lot = setup + .das_api + .get_asset(request.clone(), mutexed_tasks.clone()) + .await + .unwrap(); + assert_eq!( + response, response_stale_lot, + "Update for {} account was not rejected", + named_update.name + ); + + index_account_update( + &setup, + named_update.account, + named_update.update.clone(), + DEFAULT_SLOT + 1, + ) + .await; + + let response_new_slot = setup + .das_api + .get_asset(request.clone(), mutexed_tasks.clone()) + .await + .unwrap(); + + assert_ne!(response, response_new_slot); + insta::assert_json_snapshot!( + format!("{}-{}-updated", name, named_update.name), + response_new_slot + ); + } + + // Test that the different metadata/mint/token updates use different slots and don't interfere + // with each other + for named_update in named_updates.clone() { + setup.clean_up_data_bases().await; + + index_nft(&setup, mint).await; + + let other_named_updates = named_updates + .clone() + .into_iter() + .filter(|u| u.name != named_update.name) + .collect::>(); + + let ordered_name_updates = other_named_updates + .into_iter() + .chain(vec![named_update]) + .collect::>(); + + for (i, named_update) in ordered_name_updates.into_iter().enumerate() { + index_account_update( + &setup, + named_update.account, + named_update.update.clone(), + DEFAULT_SLOT + named_updates.len() as u64 - i as u64, + ) + .await; + } + insta::assert_json_snapshot!( + format!("{}-with-all-updates", name), + setup + .das_api + .get_asset(request.clone(), mutexed_tasks.clone()) + .await + .unwrap() + ); + } +} diff --git a/integration_tests/src/cnft_tests.rs b/integration_tests/src/cnft_tests.rs new file mode 100644 index 000000000..861e7e771 --- /dev/null +++ b/integration_tests/src/cnft_tests.rs @@ -0,0 +1,594 @@ +use entities::api_req_params::{GetAsset, Options, SearchAssets}; +use function_name::named; +use std::{str::FromStr, sync::Arc}; +use tokio::{sync::Mutex, task::JoinSet}; + +use itertools::Itertools; + +use serial_test::serial; + +use solana_sdk::signature::Signature; + +use super::common::*; + +// TODO: Adjust this so that it can be run from anywhere. +// Do not move this test name or tests will break because the snapshot name and location will change. +pub async fn run_get_asset_scenario_test( + setup: &TestSetup, + asset_id: &str, + seeds: Vec, + order: Order, + options: Options, +) { + let seed_permutations: Vec> = match order { + Order::AllPermutations => seeds.iter().permutations(seeds.len()).collect::>(), + Order::Forward => vec![seeds.iter().collect_vec()], + }; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + for events in seed_permutations { + index_seed_events(setup, events).await; + let request = GetAsset { + id: asset_id.to_string(), + options: options.clone(), + }; + + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(setup.name.clone(), response); + } +} + +#[tokio::test] +#[serial] +#[named] +async fn test_asset_decompress() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: None, + clear_db: true, + }, + ) + .await; + let asset_id = "Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8"; + + // Mint a compressed NFT and then decompress it. In production, we would receive account updates for the newly minted NFT. + // This test guarentees consistent results if we index the events in different orders. + let seeds: Vec = vec![ + // mint cNFT + seed_txn("55tQCoLUtHyu4i6Dny6SMdq4dVD61nuuLxXvRLeeQqE6xdm66Ajm4so39MXcJ2VaTmCNDEFBpitzLkiFaF7rNtHi"), + // redeem + seed_txn("4FQRV38NSP6gDo8qDbTBfy8UDHUd6Lzu4GXbHtfvWbtCArkVcbGQwinZ7M61eCmPEF5L8xu4tLAXL7ozbh5scfRi"), + // decompress + seed_txn("3Ct9n9hv5PWEYbsrgDdUDqegzsnX2n5jYRxkq5YafFAueup8mTYmN4nHhNCaEwVyVAVqNssr4fizdg9wRavT7ydE"), + // regular nft mint + seed_nft("Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8"), + ]; + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_cnft_scenario_mint_update_metadata() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + // Mint a compressed NFT and then update its metadata. Verify correct state regardless of order. + let asset_id = "FLFoCw2RBbxiw9rbEeqPWJ5rasArD9kTCKWEJirTexsU"; + let seeds: Vec = vec![ + // mint cNFT + seed_txn("2DP84v6Pi3e4v5i7KSvzmK4Ufbzof3TAiEqDbm9gg8jZpBRF9f1Cy6x54kvZoHPX9k1XfqbsG1FTv2KVP9fvNrN6"), + // update metadata + seed_txn("3bsL5zmLKvhN9Je4snTKxjFSpmXEEg2cvMHm2rCNgaEYkNXBqJTA4N7QmvBSWPiNUQPtzJSYzpQYX92NowV3L7vN"), + ]; + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_cnft_scenario_mint_update_metadata_remove_creators() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + // Mint a compressed NFT and then update its metadata to remove creators. + // Creator removal inserts a placeholder creator to handle out-of-order updates. + // This test explicitly verifies this behaviour. + let asset_id = "Gi4fAXJdnWYrEPjQm3wnW9ctgG7zJjB67zHDQtRGRWyZ"; + let seeds: Vec = vec![ + // mint cNFT + seed_txn("2qMQrXfRE7pdnjwobWeqDkEhsv6MYmv3JdgvNxTVaL1VrMCZ4JYkUnu7jiJb2etX3W9WyQgSxktUgn9skxCeqTo5"), + // update metadata (no creators) + seed_txn("41YW187sn6Z2dXfqz6zSbnPtQoE826cCSgTLnMLKa9rH1xrCqAXBQNwKnzjGc9wjU5RtMCqKhy2eMN2TjuYC8veB"), + ]; + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_cnft_owners_table() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: None, + clear_db: true, + }, + ) + .await; + + let transactions = vec![ + "25djDqCTka7wEnNMRVwsqSsVHqQMknPReTUCmvF4XGD9bUD494jZ1FsPaPjbAK45TxpdVuF2RwVCK9Jq7oxZAtMB", + "3UrxyfoJKH2jvVkzZZuCMXxtyaFUtgjhoQmirwhyiFjZXA8oM3QCBixCSBj9b53t5scvsm3qpuq5Qm4cGbNuwQP7", + "4fzBjTaXmrrJReLLSYPzn1fhPfuiU2EU1hGUddtHV1B49pvRewGyyzvMMpssi7K4Y5ZYj5xS9DrJuxqJDZRMZqY1", + ]; + for txn in transactions { + index_transaction(&setup, Signature::from_str(txn).unwrap()).await; + } + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + for (request, individual_test_name) in [ + ( + SearchAssets { + owner_address: Some("F3MdnVQkRSy56FSKroYawfMk1RJFo42Quzz8VTmFzPVz".to_string()), + page: Some(1), + limit: Some(5), + ..SearchAssets::default() + }, + "base", + ), + ( + SearchAssets { + owner_address: Some("3jnP4utL1VvjNhkxstYJ5MNayZfK4qHjFBDHNKEBpXCH".to_string()), + page: Some(1), + limit: Some(5), + ..SearchAssets::default() + }, + "with_different_owner", + ), + ] { + let response = setup + .das_api + .search_assets(request.clone(), mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(format!("{}-{}", name, individual_test_name), response); + } +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mint_no_json_uri() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: None, + clear_db: true, + }, + ) + .await; + let seeds = vec![seed_txn( + "4ASu45ELoTmvwhNqokGQrh2VH8p5zeUepYLbkcULMeXSCZJGrJa7ojgdVh5JUxBjAMF9Lrp55EgUUFPaPeWKejNQ", + )]; + run_get_asset_scenario_test( + &setup, + "DFRJ4PwAze1mMQccRmdyc46yQpEVd4FPiwtAVgzGCs7g", + seeds, + Order::Forward, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mint_delegate_transfer() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let asset_id = "77wWrvhgEkkQZQVA2hoka1JTsjG3w7BVzvcmqxDrVPWE"; + + let seeds: Vec = seed_txns([ + "KNWsAYPo3mm1HuFxRyEwBBMUZ2hqTnFXjoPVFo7WxGTfmfRwz6K8eERc4dnJpHyuoDkAZu1czK55iB1SbtCsdW2", + "3B1sASkuToCWuGFRG47axQDm1SpgLi8qDDGnRFeR7LB6oa5C3ZmkEuX98373gdMTBXED44FkwT227kBBAGSw7e8M", + "5Q8TAMMkMTHEM2BHyD2fp2sVdYKByFeATzM2mHF6Xbbar33WaeuygPKGYCWiDEt3MZU1mUrq1ePnT9o4Pa318p8w", + ]); + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mint_redeem_cancel_redeem() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let asset_id = "5WaPA7HLZKGg56bcKiroMXAzHmB1mdxK3QTeCDepLkiK"; + + let seeds: Vec = seed_txns([ + "3uzWoVgLGVd9cGXaF3JW7znpWgKse3obCa2Vvdoe59kaziX84mEXTwecUoZ49PkJDjReRMSXksKzyfj7pf3ekAGR", + "49bJ8U3cK9htmLvA1mhXXcjKdpV2YN5JQBrb3Quh7wxENz1BP9F8fE9CKsje41aMbZwzgomnkXirKx2Xpdvprtak", + "32FpSe6r9jnFNjjvbx2PPQdZqs5KpMoF6yawiRW1F6ctu1kmx2B4sLDBGjsthVQtmnhaJVrqdtmUP893FwXCbqY5", + ]); + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mint_redeem() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: None, + clear_db: true, + }, + ) + .await; + + let asset_id = "Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8"; + + let seeds: Vec = seed_txns([ + "55tQCoLUtHyu4i6Dny6SMdq4dVD61nuuLxXvRLeeQqE6xdm66Ajm4so39MXcJ2VaTmCNDEFBpitzLkiFaF7rNtHi", + "4FQRV38NSP6gDo8qDbTBfy8UDHUd6Lzu4GXbHtfvWbtCArkVcbGQwinZ7M61eCmPEF5L8xu4tLAXL7ozbh5scfRi", + // Purpose of this test is to check flow mint, redeem. But this last transaction is decompress. + // Doesn't make sense to execute it and also such as Aura node implementation is deleting data from asset leaf + // column family API response is different from expected, after parsing this last tx. + // For comparison reference implementation doesn't drop asset leaf data. + // Leave this signature hash here for future in case we need it. + // "3Ct9n9hv5PWEYbsrgDdUDqegzsnX2n5jYRxkq5YafFAueup8mTYmN4nHhNCaEwVyVAVqNssr4fizdg9wRavT7ydE", + ]); + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mint_transfer_burn() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let asset_id = "8vw7tdLGE3FBjaetsJrZAarwsbc8UESsegiLyvWXxs5A"; + + let seeds: Vec = seed_txns([ + "5coWPFty37s7haT3SVyMf6PkTaABEnhCRhfDjXeMNS58czHB5dCFPY6VrsZNwxBnqypmNic1LbLp1j5qjbdnZAc8", + "k6jmJcurgBQ6F2bVa86Z1vGb7ievzxwRZ8GAqzFEG8HicDizxceYPUm1KTzWZ3QKtGgy1EuFWUGCRqBeKU9SAoJ", + "KHNhLijkAMeKeKm6kpbk3go6q9uMF3zmfCoYSBgERe8qJDW8q5ANpnkyBuyVkychXCeWzRY8i5EtKfeGaDDU23w", + ]); + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mint_transfer_noop() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: None, + clear_db: true, + }, + ) + .await; + + let asset_id = "7myVr8fEG52mZ3jAwgz88iQRWsuzuVR2nfH8n2AXnBxE"; + + let seeds: Vec = seed_txns([ + "4nKDSvw2kGpccZWLEPnfdP7J1SEexQFRP3xWc9NBtQ1qQeGu3bu5WnAdpcLbjQ4iyX6BQ5QGF69wevE8ZeeY5poA", + "4URwUGBjbsF7UBUYdSC546tnBy7nD67txsso8D9CR9kGLtbbYh9NkGw15tEp16LLasmJX5VQR4Seh8gDjTrtdpoC", + "5bNyZfmxLVP9cKc6GjvozExrSt4F1QFt4PP992pQwT8FFHdWsX3ZFNvwurfU2xpDYtQ7qAUxVahGCraXMevRH8p1", + ]); + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mint_transfer_transfer() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let asset_id = "EcLv3bbLYr2iH5PVEuf9pJMRdDCvCqwSx3Srz6AeKjAe"; + + let seeds: Vec = seed_txns([ + "5bq936UgGs4RnxM78iXp1PwVhr8sTYoEsHCWpr8QBFtc2YtS3ieYHcsPG46G2ikwrS3tXYnUK93PzseT52AR81RR", + "5VC3Jqr5X1N8NB8zuSahHpayekLVozYkDiPjJLqU6H5M6fq9ExVLGYYKKCPbeksMPXTjy65sdEQGPzDWAYPs8QjP", + "34xjcNf3rZFKz381hKpFLqxpojaDgXEpCqH5qcpTXLaJnDbtqRz35wiuMF1cAgvJGLzYYrwaMvCK1D7LxYsdpMU1", + ]); + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mint_verify_creator() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let asset_id = "5rmTyghEuZhRTB77L3KqGMy6h5RpSNWNLj14avbxGNKB"; + + let seeds: Vec = seed_txns([ + "37ts5SqpNazPTp26VfC4oeuXpXezKYkD9oarczPNaE8TUGG8msifnTYTBJiBZNBeAUGrNw85EEfwnR1t9SieKTdq", + "4xrw5UwQSxxPzVxge6fbtmgLNsT2amaGrwpZFE95peRbnHGpxWtS2fF7whXW2xma4i2KDXdneztJZCAtgGZKTw11", + ]); + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mint_verify_collection() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let asset_id = "2WjoMU1hBGXv8sKcxQDGnu1tgMduzdZEmEEGjh8MZYfC"; + + let seeds: Vec = seed_txns([ + "63xhs5bXcuMR3uMACXWkkFMm7BJ9Thknh7WNMPzV8HJBNwpyxJTr98NrLFHnTZDHdSUFD42VFQx8rjSaGynWbaRs", + "5ZKjPxm3WAZzuqqkCDjgKpm9b5XjB9cuvv68JvXxWThvJaJxcMJgpSbYs4gDA9dGJyeLzsgNtnS6oubANF1KbBmt", + ]); + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mint_transfer_mpl_programs() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let asset_id = "ZzTjJVwo66cRyBB5zNWNhUWDdPB6TqzyXDcwjUnpSJC"; + + let seeds: Vec = seed_txns([ + "3iJ6XzhUXxGQYEEUnfkbZGdrkgS2o9vXUpsXALet3Co6sFQ2h7J21J4dTgSka8qoKiUFUzrXZFHfkqss1VFivnAG", + "4gV14HQBm8GCXjSTHEXjrhUNGmsBiyNdWY9hhCapH9cshmqbPKxn2kUU1XbajZ9j1Pxng95onzR6dx5bYqxQRh2a", + "T571TWE76frw6mWxYoHDrTdxYq7hJSyCtVEG4qmemPPtsc1CCKdknn9rTMAVcdeukLfwB1G97LZLH8eHLvuByoA", + ]); + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options::default(), + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mint_to_collection_unverify_collection() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let asset_id = "2gEbvG3Cb6JRaGWAx5e85Bf5z4u37EURBeyPBqXDzZoY"; + + let seeds: Vec = seed_txns([ + "tzXASugk8578bmtA3JAFQLEfcVQp3Np3rU9fyFas2Svk8nyBHXJnf7PdqebGNsSTwx6CEWpDCP5oLoCDcmbP35B", + "7nK9a2DSDZ4Gh6DatmxGJmuLiDEswaY9bYSSPTtQppk7PtLKXYE84jWzm7AC4G1fpa831GaXuXcn5n5ybWqB4e5", + ]); + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options { + show_unverified_collections: true, + show_collection_metadata: false, + show_inscription: false, + show_fungible: false, + }, + ) + .await; +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mint_verify_collection_unverify_collection() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let asset_id = "BiHHJ1gKV4exTjPe7PE6aydgMVqRUzzz8aeWYCGhZJ4s"; + + let seeds: Vec = seed_txns([ + "5uWXt8JAhuP2XQ2nYJTq8Ndp34fdG3vmJ7DJnb3bE6iyrZZ6jeuN9w5jZvKrduMDu4zKyQU7A3JtswhKxE3hjKBk", + "4hQQsDKgDx5PpZR7nGvxKsLSvX4J7voaiJC3ag7dPuu4HY5kbvaqD2gyeHbdja1f22ypmzouRNpuo6sbyGDSSgya", + "5k71fZRpRagY45ZYu13Q8C3Bmw6KFPBkRmbBx2NuYk7roVtvM8P16WouCZtnkhRCyKyQHSgHKyTY92t9aq2tyLdd", + ]); + + run_get_asset_scenario_test( + &setup, + asset_id, + seeds, + Order::AllPermutations, + Options { + show_unverified_collections: true, + show_collection_metadata: false, + show_inscription: false, + show_fungible: false, + }, + ) + .await; +} diff --git a/integration_tests/src/common.rs b/integration_tests/src/common.rs new file mode 100644 index 000000000..d2439c2cb --- /dev/null +++ b/integration_tests/src/common.rs @@ -0,0 +1,720 @@ +use std::{collections::HashMap, path::Path}; + +use std::str::FromStr; + +use entities::models::UnprocessedAccountMessage; +use metrics_utils::MetricState; +use mpl_token_metadata::accounts::Metadata; + +use nft_ingester::index_syncronizer::Synchronizer; +use nft_ingester::processors::transaction_based::bubblegum_updates_processor::BubblegumTxProcessor; +use nft_ingester::{ + api::{account_balance::AccountBalanceGetterImpl, DasApi}, + buffer::Buffer, + config::JsonMiddlewareConfig, + json_worker::JsonWorker, + message_parser::MessageParser, + processors::accounts_processor::AccountsProcessor, + raydium_price_fetcher::{self, RaydiumTokenPriceFetcher}, +}; +use plerkle_serialization::{ + serializer::{seralize_encoded_transaction_with_status, serialize_account}, + solana_geyser_plugin_interface_shims::ReplicaAccountInfoV2, +}; + +use postgre_client::PgClient; +use rocks_db::{batch_savers::BatchSaveStorage, migrator::MigrationState, Storage}; +use solana_client::nonblocking::rpc_client::RpcClient; +use solana_sdk::pubkey::Pubkey; +use solana_sdk::signature::Signature; +use std::sync::Arc; +use tokio::{ + sync::{broadcast, Mutex}, + task::JoinSet, + time::Instant, +}; +use usecase::proofs::MaybeProofChecker; + +use serde::de::DeserializeOwned; +use solana_account_decoder::{UiAccount, UiAccountEncoding}; +use solana_client::{ + client_error::ClientError, + client_error::Result as RpcClientResult, + rpc_config::{RpcAccountInfoConfig, RpcTransactionConfig}, + rpc_request::RpcRequest, + rpc_response::{Response as RpcResponse, RpcTokenAccountBalance}, +}; +use solana_sdk::{ + account::Account, + commitment_config::{CommitmentConfig, CommitmentLevel}, +}; +use solana_transaction_status::{EncodedConfirmedTransactionWithStatusMeta, UiTransactionEncoding}; +use std::{fmt, time::Duration}; +use tracing::{error, info}; + +use std::path::PathBuf; + +use tokio::time::sleep; + +use nft_ingester::init::init_index_storage_with_migration; + +pub const DEFAULT_SLOT: u64 = 1; + +const ACC_PROCESSOR_FETCH_BATCH_SIZE: usize = 1; + +const MAX_PG_CONNECTIONS: u32 = 5; +const MIN_PG_CONNECTIONS: u32 = 5; + +const API_MAX_PAGE_LIMIT: usize = 100; + +const DUMP_SYNCHRONIZER_BATCH_SIZE: usize = 1000; +const SYNCHRONIZER_PARALLEL_TASKS: usize = 1; + +const POSTGRE_MIGRATIONS_PATH: &str = "../migrations"; + +pub struct TestSetup { + pub name: String, + pub client: Arc, + pub db: Arc, + pub rocks_db: Arc, + pub metrics: MetricState, + pub message_parser: MessageParser, + pub acc_processor: Arc>, + pub tx_processor: BubblegumTxProcessor, + pub synchronizer: Synchronizer>, + pub das_api: DasApi< + MaybeProofChecker, + JsonWorker, + JsonWorker, + AccountBalanceGetterImpl, + RaydiumTokenPriceFetcher, + Storage, + >, +} + +impl TestSetup { + pub async fn new(name: String) -> Self { + Self::new_with_options(name, TestSetupOptions::default()).await + } + + pub async fn new_with_options(name: String, opts: TestSetupOptions) -> Self { + let red_metrics = Arc::new(metrics_utils::red::RequestErrorDurationMetrics::new()); + + let db_url = std::env::var("DATABASE_TEST_URL").unwrap(); + + let index_storage = Arc::new( + init_index_storage_with_migration( + db_url.as_ref(), + MAX_PG_CONNECTIONS, + red_metrics.clone(), + MIN_PG_CONNECTIONS, + POSTGRE_MIGRATIONS_PATH, + ) + .await + .unwrap(), + ); + + let rpc_url = match opts.network.unwrap_or_default() { + Network::Mainnet => std::env::var("MAINNET_RPC_URL").unwrap(), + Network::Devnet => std::env::var("DEVNET_RPC_URL").unwrap(), + }; + let client = Arc::new(RpcClient::new(rpc_url.to_string())); + + let (_shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); + + let buffer = Arc::new(Buffer::new()); + + let metrics_state = MetricState::new(); + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let acc_processor = AccountsProcessor::build( + shutdown_rx.resubscribe(), + ACC_PROCESSOR_FETCH_BATCH_SIZE, + buffer.clone(), + metrics_state.ingester_metrics.clone(), + None, + index_storage.clone(), + client.clone(), + mutexed_tasks.clone(), + ) + .await + .unwrap(); + + let rocks_db_dir = tempfile::TempDir::new().unwrap(); + + if opts.clear_db { + index_storage.clean_db().await.unwrap(); + } + + let storage = Arc::new( + Storage::open( + rocks_db_dir.path(), + mutexed_tasks.clone(), + red_metrics.clone(), + MigrationState::Last, + ) + .unwrap(), + ); + + let tx_processor = + BubblegumTxProcessor::new(storage.clone(), metrics_state.ingester_metrics.clone()); + + let das_api = DasApi::new( + index_storage.clone(), + storage.clone(), + metrics_state.api_metrics.clone(), + None, + None, + API_MAX_PAGE_LIMIT, + None, + None, + JsonMiddlewareConfig::default(), + Arc::new(AccountBalanceGetterImpl::new(client.clone())), + None, + Arc::new(RaydiumTokenPriceFetcher::new( + "".to_string(), // API url, is not used in tests + raydium_price_fetcher::CACHE_TTL, + None, + )), + "11111111111111111111111111111111".to_string(), + ); + + let message_parser = MessageParser::new(); + + let synchronizer = Synchronizer::new( + storage.clone(), + index_storage.clone(), + index_storage.clone(), + DUMP_SYNCHRONIZER_BATCH_SIZE, + "./dump".to_string(), + metrics_state.synchronizer_metrics.clone(), + SYNCHRONIZER_PARALLEL_TASKS, + false, + ); + + TestSetup { + name, + client, + db: index_storage, + rocks_db: storage, + metrics: metrics_state, + acc_processor: Arc::new(acc_processor), + tx_processor, + synchronizer, + message_parser, + das_api, + } + } + + pub async fn clean_up_data_bases(&self) { + self.db.clean_db().await.unwrap(); + self.rocks_db.clean_db().await; + } +} + +#[derive(Clone, Copy, Default)] +pub struct TestSetupOptions { + pub network: Option, + pub clear_db: bool, +} + +pub async fn get_transaction( + client: &RpcClient, + signature: Signature, + max_retries: u8, +) -> Result { + let mut retries = 0; + let mut delay = Duration::from_millis(500); + + const CONFIG: RpcTransactionConfig = RpcTransactionConfig { + encoding: Some(UiTransactionEncoding::Base64), + commitment: Some(CommitmentConfig { + commitment: CommitmentLevel::Confirmed, + }), + max_supported_transaction_version: Some(0), + }; + + loop { + let response = client + .send( + RpcRequest::GetTransaction, + serde_json::json!([signature.to_string(), CONFIG,]), + ) + .await; + + if let Err(error) = response { + if retries < max_retries { + error!("failed to get transaction {:?}: {:?}", signature, error); + sleep(delay).await; + delay *= 2; + retries += 1; + continue; + } else { + return Err(error); + } + } + return response; + } +} + +pub async fn fetch_and_serialize_transaction( + client: &RpcClient, + sig: Signature, +) -> anyhow::Result>> { + let max_retries = 5; + let tx: EncodedConfirmedTransactionWithStatusMeta = + get_transaction(client, sig, max_retries).await?; + + // Ignore if tx failed or meta is missed + let meta = tx.transaction.meta.as_ref(); + if meta.map(|meta| meta.status.is_err()).unwrap_or(true) { + info!("Ignoring failed transaction: {}", sig); + return Ok(None); + } + let fbb = flatbuffers::FlatBufferBuilder::new(); + let fbb = seralize_encoded_transaction_with_status(fbb, tx)?; + let serialized = fbb.finished_data(); + + Ok(Some(serialized.to_vec())) +} + +// Util functions for accounts +pub async fn rpc_tx_with_retries( + client: &RpcClient, + request: RpcRequest, + value: serde_json::Value, + max_retries: u8, + error_key: E, +) -> RpcClientResult +where + T: DeserializeOwned, + E: fmt::Debug, +{ + let mut retries = 0; + let mut delay = Duration::from_millis(500); + loop { + match client.send(request, value.clone()).await { + Ok(value) => return Ok(value), + Err(error) => { + if retries < max_retries { + error!("retrying {request} {error_key:?}: {error}"); + sleep(delay).await; + delay *= 2; + retries += 1; + } else { + return Err(error); + } + } + } + } +} + +pub async fn fetch_account( + pubkey: Pubkey, + client: &RpcClient, + max_retries: u8, +) -> anyhow::Result<(Account, u64)> { + const CONFIG: RpcAccountInfoConfig = RpcAccountInfoConfig { + encoding: Some(UiAccountEncoding::Base64Zstd), + commitment: Some(CommitmentConfig { + commitment: CommitmentLevel::Finalized, + }), + data_slice: None, + min_context_slot: None, + }; + + let response: RpcResponse> = rpc_tx_with_retries( + client, + RpcRequest::GetAccountInfo, + serde_json::json!([pubkey.to_string(), CONFIG]), + max_retries, + pubkey, + ) + .await?; + + let account: Account = response + .value + .ok_or_else(|| anyhow::anyhow!("failed to get account {pubkey}"))? + .decode() + .ok_or_else(|| anyhow::anyhow!("failed to parse account {pubkey}"))?; + + Ok((account, response.context.slot)) +} + +pub async fn fetch_and_serialize_account( + client: &RpcClient, + pubkey: Pubkey, + slot: Option, +) -> anyhow::Result> { + let max_retries = 5; + + let fetch_result = fetch_account(pubkey, client, max_retries).await; + + let (account, actual_slot) = match fetch_result { + Ok((account, actual_slot)) => (account, actual_slot), + Err(e) => { + return Err(anyhow::anyhow!("Failed to fetch account: {:?}", e)); + } + }; + + let fbb = flatbuffers::FlatBufferBuilder::new(); + let account_info = ReplicaAccountInfoV2 { + pubkey: &pubkey.to_bytes(), + lamports: account.lamports, + owner: &account.owner.to_bytes(), + executable: account.executable, + rent_epoch: account.rent_epoch, + data: &account.data, + write_version: 0, + txn_signature: None, + }; + let is_startup = false; + + let fbb = serialize_account( + fbb, + &account_info, + match slot { + Some(slot) => slot, + None => actual_slot, + }, + is_startup, + ); + Ok(fbb.finished_data().to_vec()) +} + +pub async fn get_token_largest_account(client: &RpcClient, mint: Pubkey) -> anyhow::Result { + let response: RpcResponse> = rpc_tx_with_retries( + client, + RpcRequest::Custom { + method: "getTokenLargestAccounts", + }, + serde_json::json!([mint.to_string(),]), + 5, + mint, + ) + .await?; + + match response.value.first() { + Some(account) => { + let pubkey = Pubkey::from_str(&account.address); + match pubkey { + Ok(pubkey) => Ok(pubkey), + Err(e) => anyhow::bail!("failed to parse pubkey: {:?}", e), + } + } + None => anyhow::bail!("no accounts for mint {mint}: burned nft?"), + } +} + +pub async fn index_account_bytes(setup: &TestSetup, account_bytes: Vec) { + let parsed_acc = setup + .message_parser + .parse_account(account_bytes, false) + .unwrap(); + let ready_to_process = parsed_acc + .into_iter() + .map(|acc| UnprocessedAccountMessage { + account: acc.unprocessed_account, + key: acc.pubkey, + id: String::new(), // Redis message id + }) + .collect(); + + let mut batch_storage = BatchSaveStorage::new( + setup.rocks_db.clone(), + 1, + setup.metrics.ingester_metrics.clone(), + ); + + let mut interval = tokio::time::interval(Duration::from_millis(1)); + let mut batch_fill_instant = Instant::now(); + let mut core_fees = HashMap::new(); + setup + .acc_processor + .process_account( + &mut batch_storage, + ready_to_process, + &mut core_fees, + &mut vec![], + &mut interval, + &mut batch_fill_instant, + ) + .await; + + let _ = batch_storage.flush(); + + let (_shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); + setup + .synchronizer + .synchronize_nft_asset_indexes(&shutdown_rx, 1000) + .await + .unwrap(); + + setup + .synchronizer + .synchronize_fungible_asset_indexes(&shutdown_rx, 1000) + .await + .unwrap(); +} + +pub async fn cached_fetch_account( + setup: &TestSetup, + account: Pubkey, + slot: Option, +) -> Vec { + cached_fetch_account_with_error_handling(setup, account, slot) + .await + .unwrap() +} + +fn get_relative_project_path(path: &str) -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(path) +} + +async fn cached_fetch_account_with_error_handling( + setup: &TestSetup, + account: Pubkey, + slot: Option, +) -> anyhow::Result> { + let dir = get_relative_project_path(&format!("src/data/accounts/{}", setup.name)); + + if !Path::new(&dir).exists() { + std::fs::create_dir(&dir).unwrap(); + } + let file_path = dir.join(format!("{}", account)); + + if file_path.exists() { + Ok(std::fs::read(file_path).unwrap()) + } else { + let account_bytes = fetch_and_serialize_account(&setup.client, account, slot).await?; + std::fs::write(file_path, &account_bytes).unwrap(); + Ok(account_bytes) + } +} + +async fn cached_fetch_transaction(setup: &TestSetup, sig: Signature) -> Vec { + let dir = get_relative_project_path(&format!("src/data/transactions/{}", setup.name)); + + if !Path::new(&dir).exists() { + std::fs::create_dir(&dir).unwrap(); + } + let file_path = dir.join(format!("{}", sig)); + + if file_path.exists() { + std::fs::read(file_path).unwrap() + } else { + let txn_bytes = fetch_and_serialize_transaction(&setup.client, sig) + .await + .unwrap() + .unwrap(); + std::fs::write(file_path, &txn_bytes).unwrap(); + txn_bytes + } +} + +pub async fn index_transaction(setup: &TestSetup, sig: Signature) { + let txn_bytes: Vec = cached_fetch_transaction(setup, sig).await; + + let ready_to_process_tx = setup + .message_parser + .parse_transaction(txn_bytes, false) + .unwrap(); + + setup + .tx_processor + .process_transaction(ready_to_process_tx) + .await + .unwrap(); + + let (_shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); + setup + .synchronizer + .synchronize_nft_asset_indexes(&shutdown_rx, 1000) + .await + .unwrap(); + + setup + .synchronizer + .synchronize_fungible_asset_indexes(&shutdown_rx, 1000) + .await + .unwrap(); +} + +async fn cached_fetch_largest_token_account_id(client: &RpcClient, mint: Pubkey) -> Pubkey { + let dir = get_relative_project_path(&format!("src/data/largest_token_account_ids/{}", mint)); + + if !Path::new(&dir).exists() { + std::fs::create_dir(&dir).unwrap(); + } + let file_path = dir.join(format!("{}", mint)); + + if file_path.exists() { + Pubkey::try_from(std::fs::read(file_path).unwrap()).unwrap() + } else { + let token_account = get_token_largest_account(client, mint).await.unwrap(); + std::fs::write(file_path, token_account.to_bytes()).unwrap(); + token_account + } +} + +#[allow(unused)] +#[derive(Clone, Copy, Debug)] +pub enum SeedEvent { + Account(Pubkey), + Nft(Pubkey), + TokenMint(Pubkey), + Signature(Signature), +} + +#[derive(Clone, Copy, Debug, Default)] +pub enum Network { + #[default] + Mainnet, + Devnet, +} + +#[derive(Clone, Copy, Debug)] +pub enum Order { + Forward, + AllPermutations, +} + +pub async fn index_seed_events(setup: &TestSetup, events: Vec<&SeedEvent>) { + for event in events { + match event { + SeedEvent::Account(account) => { + index_account_with_ordered_slot(setup, *account).await; + } + SeedEvent::Nft(mint) => { + index_nft(setup, *mint).await; + } + SeedEvent::Signature(sig) => { + index_transaction(setup, *sig).await; + } + SeedEvent::TokenMint(mint) => { + index_token_mint(setup, *mint).await; + } + } + } +} + +#[allow(unused)] +pub fn seed_account(str: &str) -> SeedEvent { + SeedEvent::Account(Pubkey::from_str(str).unwrap()) +} + +pub fn seed_nft(str: &str) -> SeedEvent { + SeedEvent::Nft(Pubkey::from_str(str).unwrap()) +} + +#[allow(unused)] +pub fn seed_token_mint(str: &str) -> SeedEvent { + SeedEvent::TokenMint(Pubkey::from_str(str).unwrap()) +} + +pub fn seed_txn(str: &str) -> SeedEvent { + SeedEvent::Signature(Signature::from_str(str).unwrap()) +} + +pub fn seed_txns(strs: I) -> Vec +where + I: IntoIterator, + I::Item: AsRef, +{ + strs.into_iter().map(|s| seed_txn(s.as_ref())).collect() +} + +#[allow(unused)] +pub fn seed_accounts(strs: I) -> Vec +where + I: IntoIterator, + I::Item: AsRef, +{ + strs.into_iter().map(|s| seed_account(s.as_ref())).collect() +} + +pub fn seed_nfts(strs: I) -> Vec +where + I: IntoIterator, + I::Item: AsRef, +{ + strs.into_iter().map(|s| seed_nft(s.as_ref())).collect() +} + +#[allow(unused)] +pub fn seed_token_mints(strs: I) -> Vec +where + I: IntoIterator, + I::Item: AsRef, +{ + strs.into_iter() + .map(|s| seed_token_mint(s.as_ref())) + .collect() +} + +pub async fn index_account(setup: &TestSetup, account: Pubkey) { + // If we used different slots for accounts, then it becomes harder to test updates of related + // accounts because we need to factor the fact that some updates can be disregarded because + // they are "stale". + let slot = Some(DEFAULT_SLOT); + let account_bytes = cached_fetch_account(setup, account, slot).await; + index_account_bytes(setup, account_bytes).await; +} + +#[derive(Clone, Copy)] +pub struct NftAccounts { + pub mint: Pubkey, + pub metadata: Pubkey, + pub token: Pubkey, +} + +pub async fn get_nft_accounts(setup: &TestSetup, mint: Pubkey) -> NftAccounts { + let metadata_account = Metadata::find_pda(&mint).0; + let token_account = cached_fetch_largest_token_account_id(&setup.client, mint).await; + NftAccounts { + mint, + metadata: metadata_account, + token: token_account, + } +} + +async fn index_account_with_ordered_slot(setup: &TestSetup, account: Pubkey) { + let slot = None; + let account_bytes = cached_fetch_account(setup, account, slot).await; + index_account_bytes(setup, account_bytes).await; +} + +async fn index_token_mint(setup: &TestSetup, mint: Pubkey) { + let token_account = cached_fetch_largest_token_account_id(&setup.client, mint).await; + index_account(setup, mint).await; + index_account(setup, token_account).await; + + // If we used different slots for accounts, then it becomes harder to test updates of related + // accounts because we need to factor the fact that some updates can be disregarded because + // they are "stale". + let slot = Some(1); + let metadata_account = Metadata::find_pda(&mint).0; + match cached_fetch_account_with_error_handling(setup, metadata_account, slot).await { + Ok(account_bytes) => { + index_account_bytes(setup, account_bytes).await; + } + Err(_) => { + // If we can't find the metadata account, then we assume that the mint is not an NFT. + } + } +} + +pub async fn index_nft(setup: &TestSetup, mint: Pubkey) { + index_nft_accounts(setup, get_nft_accounts(setup, mint).await).await; +} + +pub async fn index_nft_accounts(setup: &TestSetup, nft_accounts: NftAccounts) { + for account in [nft_accounts.mint, nft_accounts.metadata, nft_accounts.token] { + index_account(setup, account).await; + } +} + +pub fn trim_test_name(name: &str) -> String { + name.replace("test_", "") +} diff --git a/integration_tests/src/data/accounts/account_updates/6cQEeDKhobxxsvPKcAsdHZbDrPgQtEpo8PFzayDu5Gi5 b/integration_tests/src/data/accounts/account_updates/6cQEeDKhobxxsvPKcAsdHZbDrPgQtEpo8PFzayDu5Gi5 new file mode 100644 index 000000000..cc666a0d0 Binary files /dev/null and b/integration_tests/src/data/accounts/account_updates/6cQEeDKhobxxsvPKcAsdHZbDrPgQtEpo8PFzayDu5Gi5 differ diff --git a/integration_tests/src/data/accounts/account_updates/843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f b/integration_tests/src/data/accounts/account_updates/843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f new file mode 100644 index 000000000..71361ffeb Binary files /dev/null and b/integration_tests/src/data/accounts/account_updates/843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f differ diff --git a/integration_tests/src/data/accounts/account_updates/DPBEGb2DajfkannVZ6J4NxRY2ovz1yn1j36i1WVdZVpW b/integration_tests/src/data/accounts/account_updates/DPBEGb2DajfkannVZ6J4NxRY2ovz1yn1j36i1WVdZVpW new file mode 100644 index 000000000..f9826c63c Binary files /dev/null and b/integration_tests/src/data/accounts/account_updates/DPBEGb2DajfkannVZ6J4NxRY2ovz1yn1j36i1WVdZVpW differ diff --git a/integration_tests/src/data/accounts/asset_decompress/5AgDPkMLtW7dxK4vo4Sbhgj2ek2Xxqe6k9HMSfwz7JrE b/integration_tests/src/data/accounts/asset_decompress/5AgDPkMLtW7dxK4vo4Sbhgj2ek2Xxqe6k9HMSfwz7JrE new file mode 100644 index 000000000..9c35b7ecf Binary files /dev/null and b/integration_tests/src/data/accounts/asset_decompress/5AgDPkMLtW7dxK4vo4Sbhgj2ek2Xxqe6k9HMSfwz7JrE differ diff --git a/integration_tests/src/data/accounts/asset_decompress/Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8 b/integration_tests/src/data/accounts/asset_decompress/Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8 new file mode 100644 index 000000000..8d56745df Binary files /dev/null and b/integration_tests/src/data/accounts/asset_decompress/Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8 differ diff --git a/integration_tests/src/data/accounts/asset_decompress/B5PfwkTK4WQtm5wqmb8ViTXiw9bvQgaJwVSbtssbmRvJ b/integration_tests/src/data/accounts/asset_decompress/B5PfwkTK4WQtm5wqmb8ViTXiw9bvQgaJwVSbtssbmRvJ new file mode 100644 index 000000000..78999c3de Binary files /dev/null and b/integration_tests/src/data/accounts/asset_decompress/B5PfwkTK4WQtm5wqmb8ViTXiw9bvQgaJwVSbtssbmRvJ differ diff --git a/integration_tests/src/data/accounts/asset_parsing/6cQEeDKhobxxsvPKcAsdHZbDrPgQtEpo8PFzayDu5Gi5 b/integration_tests/src/data/accounts/asset_parsing/6cQEeDKhobxxsvPKcAsdHZbDrPgQtEpo8PFzayDu5Gi5 new file mode 100644 index 000000000..890016a0c Binary files /dev/null and b/integration_tests/src/data/accounts/asset_parsing/6cQEeDKhobxxsvPKcAsdHZbDrPgQtEpo8PFzayDu5Gi5 differ diff --git a/integration_tests/src/data/accounts/asset_parsing/843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f b/integration_tests/src/data/accounts/asset_parsing/843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f new file mode 100644 index 000000000..ce9ccbf8e Binary files /dev/null and b/integration_tests/src/data/accounts/asset_parsing/843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f differ diff --git a/integration_tests/src/data/accounts/asset_parsing/DPBEGb2DajfkannVZ6J4NxRY2ovz1yn1j36i1WVdZVpW b/integration_tests/src/data/accounts/asset_parsing/DPBEGb2DajfkannVZ6J4NxRY2ovz1yn1j36i1WVdZVpW new file mode 100644 index 000000000..54ccbe8ae Binary files /dev/null and b/integration_tests/src/data/accounts/asset_parsing/DPBEGb2DajfkannVZ6J4NxRY2ovz1yn1j36i1WVdZVpW differ diff --git a/integration_tests/src/data/accounts/creators_reordering/3mM2EwVk23dZ1SfNQBvhLJhAwuMAJcm5XFg9f8wpHYTP b/integration_tests/src/data/accounts/creators_reordering/3mM2EwVk23dZ1SfNQBvhLJhAwuMAJcm5XFg9f8wpHYTP new file mode 100644 index 000000000..a5ff8b3ba Binary files /dev/null and b/integration_tests/src/data/accounts/creators_reordering/3mM2EwVk23dZ1SfNQBvhLJhAwuMAJcm5XFg9f8wpHYTP differ diff --git a/integration_tests/src/data/accounts/creators_reordering/ANt9HygtvFmFJ1UcAHFLnM62JJWjk8fujMzjGfpKBfzk b/integration_tests/src/data/accounts/creators_reordering/ANt9HygtvFmFJ1UcAHFLnM62JJWjk8fujMzjGfpKBfzk new file mode 100644 index 000000000..692a5978c Binary files /dev/null and b/integration_tests/src/data/accounts/creators_reordering/ANt9HygtvFmFJ1UcAHFLnM62JJWjk8fujMzjGfpKBfzk differ diff --git a/integration_tests/src/data/accounts/creators_reordering/CejAywG35pUojBh9ghjDMcUphEBCW8CidFymDAkubzhq b/integration_tests/src/data/accounts/creators_reordering/CejAywG35pUojBh9ghjDMcUphEBCW8CidFymDAkubzhq new file mode 100644 index 000000000..719aaa97b Binary files /dev/null and b/integration_tests/src/data/accounts/creators_reordering/CejAywG35pUojBh9ghjDMcUphEBCW8CidFymDAkubzhq differ diff --git a/integration_tests/src/data/accounts/mpl_core_autograph_plugin/Hz4MSHgevYkpwF3cerDLuPJLQE3GZ5yDWu7vqmQGpRMU b/integration_tests/src/data/accounts/mpl_core_autograph_plugin/Hz4MSHgevYkpwF3cerDLuPJLQE3GZ5yDWu7vqmQGpRMU new file mode 100644 index 000000000..7560f1d6e Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_autograph_plugin/Hz4MSHgevYkpwF3cerDLuPJLQE3GZ5yDWu7vqmQGpRMU differ diff --git a/integration_tests/src/data/accounts/mpl_core_autograph_plugin_with_signature/4MCuZ5WNCgFnb7YJ2exj34qsLscmwd23WcoLBXBkaB7d b/integration_tests/src/data/accounts/mpl_core_autograph_plugin_with_signature/4MCuZ5WNCgFnb7YJ2exj34qsLscmwd23WcoLBXBkaB7d new file mode 100644 index 000000000..b1667b268 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_autograph_plugin_with_signature/4MCuZ5WNCgFnb7YJ2exj34qsLscmwd23WcoLBXBkaB7d differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset/x3hJtpU4AUsGejNvxzX9TKjcyNB1eYtDdDPWdeF6opr b/integration_tests/src/data/accounts/mpl_core_get_asset/x3hJtpU4AUsGejNvxzX9TKjcyNB1eYtDdDPWdeF6opr new file mode 100644 index 000000000..92e7400a5 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset/x3hJtpU4AUsGejNvxzX9TKjcyNB1eYtDdDPWdeF6opr differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_app_data_with_binary_data_and_owner_is_data_authority/6tqX4RuPCoD9dVKEJ51jykwBwjKh6runcHJSuSHpDPJU b/integration_tests/src/data/accounts/mpl_core_get_asset_with_app_data_with_binary_data_and_owner_is_data_authority/6tqX4RuPCoD9dVKEJ51jykwBwjKh6runcHJSuSHpDPJU new file mode 100644 index 000000000..3e01e588f Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_app_data_with_binary_data_and_owner_is_data_authority/6tqX4RuPCoD9dVKEJ51jykwBwjKh6runcHJSuSHpDPJU differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_app_data_with_json_data_and_update_authority_is_data_authority/39XrhcVGuyq4HwTarxMCwDEMFtPBY5Nctxrvpvpdpe3g b/integration_tests/src/data/accounts/mpl_core_get_asset_with_app_data_with_json_data_and_update_authority_is_data_authority/39XrhcVGuyq4HwTarxMCwDEMFtPBY5Nctxrvpvpdpe3g new file mode 100644 index 000000000..834468498 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_app_data_with_json_data_and_update_authority_is_data_authority/39XrhcVGuyq4HwTarxMCwDEMFtPBY5Nctxrvpvpdpe3g differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_app_data_with_msg_pack_data_and_address_is_data_authority/2pY3t29uxpBotbmKbCsQNjYfML5DBoBshDgB7hpHu3XA b/integration_tests/src/data/accounts/mpl_core_get_asset_with_app_data_with_msg_pack_data_and_address_is_data_authority/2pY3t29uxpBotbmKbCsQNjYfML5DBoBshDgB7hpHu3XA new file mode 100644 index 000000000..19a2a13e1 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_app_data_with_msg_pack_data_and_address_is_data_authority/2pY3t29uxpBotbmKbCsQNjYfML5DBoBshDgB7hpHu3XA differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_data_section_with_binary_data/BVjK8uvqUuH5YU6ThX6A7gznx2xi8BxshawbuFe1Y5Vr b/integration_tests/src/data/accounts/mpl_core_get_asset_with_data_section_with_binary_data/BVjK8uvqUuH5YU6ThX6A7gznx2xi8BxshawbuFe1Y5Vr new file mode 100644 index 000000000..d198b2b10 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_data_section_with_binary_data/BVjK8uvqUuH5YU6ThX6A7gznx2xi8BxshawbuFe1Y5Vr differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_data_section_with_json_data/9vqNxe6M6t7PYo1gXrY18hVgDvCpouHSZ6vdDEFbybeA b/integration_tests/src/data/accounts/mpl_core_get_asset_with_data_section_with_json_data/9vqNxe6M6t7PYo1gXrY18hVgDvCpouHSZ6vdDEFbybeA new file mode 100644 index 000000000..be3dd61c2 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_data_section_with_json_data/9vqNxe6M6t7PYo1gXrY18hVgDvCpouHSZ6vdDEFbybeA differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_data_section_with_msg_pack_data/EuXEcqHhF9jPxV9CKB5hjHC2TRo3xprdgk5vJTc9qRaY b/integration_tests/src/data/accounts/mpl_core_get_asset_with_data_section_with_msg_pack_data/EuXEcqHhF9jPxV9CKB5hjHC2TRo3xprdgk5vJTc9qRaY new file mode 100644 index 000000000..f2168012e Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_data_section_with_msg_pack_data/EuXEcqHhF9jPxV9CKB5hjHC2TRo3xprdgk5vJTc9qRaY differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_edition/AejY8LGKAbQsrGZS1qgN4uFu99dJD3f8Js9Yrt7K3tCc b/integration_tests/src/data/accounts/mpl_core_get_asset_with_edition/AejY8LGKAbQsrGZS1qgN4uFu99dJD3f8Js9Yrt7K3tCc new file mode 100644 index 000000000..9c8ce6f00 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_edition/AejY8LGKAbQsrGZS1qgN4uFu99dJD3f8Js9Yrt7K3tCc differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_multiple_internal_and_external_plugins/Aw7KSaeRECbjLW7BYTUtMwGkaiAGhxrQxdLnpLYRnmbB b/integration_tests/src/data/accounts/mpl_core_get_asset_with_multiple_internal_and_external_plugins/Aw7KSaeRECbjLW7BYTUtMwGkaiAGhxrQxdLnpLYRnmbB new file mode 100644 index 000000000..b70250560 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_multiple_internal_and_external_plugins/Aw7KSaeRECbjLW7BYTUtMwGkaiAGhxrQxdLnpLYRnmbB differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_oracle_custom_offset_and_base_address_config/9v2H5sDBXKmYkGHebfaWwdgBWuMTBVWQom3QeEcV8oJj b/integration_tests/src/data/accounts/mpl_core_get_asset_with_oracle_custom_offset_and_base_address_config/9v2H5sDBXKmYkGHebfaWwdgBWuMTBVWQom3QeEcV8oJj new file mode 100644 index 000000000..89ef10e90 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_oracle_custom_offset_and_base_address_config/9v2H5sDBXKmYkGHebfaWwdgBWuMTBVWQom3QeEcV8oJj differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_oracle_external_plugin_on_collection/Hvdg2FjMEndC4jxF2MJgKCaj5omLLZ19LNfD4p9oXkpE b/integration_tests/src/data/accounts/mpl_core_get_asset_with_oracle_external_plugin_on_collection/Hvdg2FjMEndC4jxF2MJgKCaj5omLLZ19LNfD4p9oXkpE new file mode 100644 index 000000000..41eb31b36 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_oracle_external_plugin_on_collection/Hvdg2FjMEndC4jxF2MJgKCaj5omLLZ19LNfD4p9oXkpE differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_oracle_multiple_lifecycle_events/3puHPHUHFXxhS7qPQa5YYTngzPbetoWbu7y2UxxB6xrF b/integration_tests/src/data/accounts/mpl_core_get_asset_with_oracle_multiple_lifecycle_events/3puHPHUHFXxhS7qPQa5YYTngzPbetoWbu7y2UxxB6xrF new file mode 100644 index 000000000..9c0e136de Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_oracle_multiple_lifecycle_events/3puHPHUHFXxhS7qPQa5YYTngzPbetoWbu7y2UxxB6xrF differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_oracle_no_offset/2TZpUiBiyMdwLFTKRshVMHK8anQK2W8XXbfUfyxR8yvc b/integration_tests/src/data/accounts/mpl_core_get_asset_with_oracle_no_offset/2TZpUiBiyMdwLFTKRshVMHK8anQK2W8XXbfUfyxR8yvc new file mode 100644 index 000000000..0d7195553 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_oracle_no_offset/2TZpUiBiyMdwLFTKRshVMHK8anQK2W8XXbfUfyxR8yvc differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_pubkey_in_rule_set/8H71x9Bhh9E9o3MZK4QnVC5MRFn1WZRf2Mc9w2wEbG5V b/integration_tests/src/data/accounts/mpl_core_get_asset_with_pubkey_in_rule_set/8H71x9Bhh9E9o3MZK4QnVC5MRFn1WZRf2Mc9w2wEbG5V new file mode 100644 index 000000000..83f47a28b Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_pubkey_in_rule_set/8H71x9Bhh9E9o3MZK4QnVC5MRFn1WZRf2Mc9w2wEbG5V differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_asset_with_two_oracle_external_plugins/4aarnaiMVtGEp5nToRqBEUGtqY2F1gW2V8bBQe1rN5V9 b/integration_tests/src/data/accounts/mpl_core_get_asset_with_two_oracle_external_plugins/4aarnaiMVtGEp5nToRqBEUGtqY2F1gW2V8bBQe1rN5V9 new file mode 100644 index 000000000..cf6ab66c3 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_asset_with_two_oracle_external_plugins/4aarnaiMVtGEp5nToRqBEUGtqY2F1gW2V8bBQe1rN5V9 differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_assets_by_authority/4FcFVJVPRsYoMjt8ewDGV5nipoK63SNrJzjrBHyXvhcz b/integration_tests/src/data/accounts/mpl_core_get_assets_by_authority/4FcFVJVPRsYoMjt8ewDGV5nipoK63SNrJzjrBHyXvhcz new file mode 100644 index 000000000..dd9b86923 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_assets_by_authority/4FcFVJVPRsYoMjt8ewDGV5nipoK63SNrJzjrBHyXvhcz differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_assets_by_authority/9CSyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci b/integration_tests/src/data/accounts/mpl_core_get_assets_by_authority/9CSyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci new file mode 100644 index 000000000..59f6c8ce6 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_assets_by_authority/9CSyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_assets_by_group/EgzsppfYJmUet4ve8MnuHMyvSnj6R7LRmwsGEH5TuGhB b/integration_tests/src/data/accounts/mpl_core_get_assets_by_group/EgzsppfYJmUet4ve8MnuHMyvSnj6R7LRmwsGEH5TuGhB new file mode 100644 index 000000000..e9adbd832 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_assets_by_group/EgzsppfYJmUet4ve8MnuHMyvSnj6R7LRmwsGEH5TuGhB differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_assets_by_group/J2kazVRuZ33Po4PVyZGxiDYUMQ1eZiT5Xa13usRYo264 b/integration_tests/src/data/accounts/mpl_core_get_assets_by_group/J2kazVRuZ33Po4PVyZGxiDYUMQ1eZiT5Xa13usRYo264 new file mode 100644 index 000000000..4d3efc957 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_assets_by_group/J2kazVRuZ33Po4PVyZGxiDYUMQ1eZiT5Xa13usRYo264 differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_assets_by_group/JChzyyp1CnNz56tJLteQ5BsbngmWQ3JwcxLZrmuQA5b7 b/integration_tests/src/data/accounts/mpl_core_get_assets_by_group/JChzyyp1CnNz56tJLteQ5BsbngmWQ3JwcxLZrmuQA5b7 new file mode 100644 index 000000000..59c0e47c3 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_assets_by_group/JChzyyp1CnNz56tJLteQ5BsbngmWQ3JwcxLZrmuQA5b7 differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_assets_by_group/kTMCCKLTaZsnSReer12HsciwScUwhHyZyd9D9BwQF8k b/integration_tests/src/data/accounts/mpl_core_get_assets_by_group/kTMCCKLTaZsnSReer12HsciwScUwhHyZyd9D9BwQF8k new file mode 100644 index 000000000..aa4ec46ff Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_assets_by_group/kTMCCKLTaZsnSReer12HsciwScUwhHyZyd9D9BwQF8k differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds/CWJDcrzxSDE7FeNRzMK1aSia7qoaUPrrGQ81E7vkQpq4 b/integration_tests/src/data/accounts/mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds/CWJDcrzxSDE7FeNRzMK1aSia7qoaUPrrGQ81E7vkQpq4 new file mode 100644 index 000000000..8d3db4a59 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds/CWJDcrzxSDE7FeNRzMK1aSia7qoaUPrrGQ81E7vkQpq4 differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds/Do7rVGmVNa9wjsKNyjoa5phqriLER6HCqUQm5zyoTX3f b/integration_tests/src/data/accounts/mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds/Do7rVGmVNa9wjsKNyjoa5phqriLER6HCqUQm5zyoTX3f new file mode 100644 index 000000000..ee7631e9e Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds/Do7rVGmVNa9wjsKNyjoa5phqriLER6HCqUQm5zyoTX3f differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_assets_by_owner/4FFhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb b/integration_tests/src/data/accounts/mpl_core_get_assets_by_owner/4FFhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb new file mode 100644 index 000000000..61476fa2d Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_assets_by_owner/4FFhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_assets_by_owner/9tsHoBrkSqBW5uMxKZyvxL6m9CCaz1a7sGEg8SuckUj b/integration_tests/src/data/accounts/mpl_core_get_assets_by_owner/9tsHoBrkSqBW5uMxKZyvxL6m9CCaz1a7sGEg8SuckUj new file mode 100644 index 000000000..cb2d50c8a Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_assets_by_owner/9tsHoBrkSqBW5uMxKZyvxL6m9CCaz1a7sGEg8SuckUj differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_assett/x3hJtpU4AUsGejNvxzX9TKjcyNB1eYtDdDPWdeF6opr b/integration_tests/src/data/accounts/mpl_core_get_assett/x3hJtpU4AUsGejNvxzX9TKjcyNB1eYtDdDPWdeF6opr new file mode 100644 index 000000000..c3a4cc12d Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_assett/x3hJtpU4AUsGejNvxzX9TKjcyNB1eYtDdDPWdeF6opr differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_collection/DHciVfQxHHM7t2asQJRjjkKbjvZ4PuG3Y3uiULMQUjJQ b/integration_tests/src/data/accounts/mpl_core_get_collection/DHciVfQxHHM7t2asQJRjjkKbjvZ4PuG3Y3uiULMQUjJQ new file mode 100644 index 000000000..13e66c656 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_collection/DHciVfQxHHM7t2asQJRjjkKbjvZ4PuG3Y3uiULMQUjJQ differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_collection_with_linked_app_data_with_binary_data_and_address_is_data_authority/41thppJ4z9HnBNbFMLnztXS7seqBptYV1jG8UhxR4vK8 b/integration_tests/src/data/accounts/mpl_core_get_collection_with_linked_app_data_with_binary_data_and_address_is_data_authority/41thppJ4z9HnBNbFMLnztXS7seqBptYV1jG8UhxR4vK8 new file mode 100644 index 000000000..99cc76126 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_collection_with_linked_app_data_with_binary_data_and_address_is_data_authority/41thppJ4z9HnBNbFMLnztXS7seqBptYV1jG8UhxR4vK8 differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_collection_with_linked_app_data_with_json_data_and_owner_is_data_authority/2aUn89GKuSjfYTeCH6GL1Y6CiUYqjvcgZehFGDJbhNeW b/integration_tests/src/data/accounts/mpl_core_get_collection_with_linked_app_data_with_json_data_and_owner_is_data_authority/2aUn89GKuSjfYTeCH6GL1Y6CiUYqjvcgZehFGDJbhNeW new file mode 100644 index 000000000..56065103f Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_collection_with_linked_app_data_with_json_data_and_owner_is_data_authority/2aUn89GKuSjfYTeCH6GL1Y6CiUYqjvcgZehFGDJbhNeW differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_collection_with_linked_app_data_with_msg_pack_data_and_update_authority_is_data_authority/53q1PCBy5KgzZfoHu6bnLWQFVmJtKyceP8DqNMhXWUaA b/integration_tests/src/data/accounts/mpl_core_get_collection_with_linked_app_data_with_msg_pack_data_and_update_authority_is_data_authority/53q1PCBy5KgzZfoHu6bnLWQFVmJtKyceP8DqNMhXWUaA new file mode 100644 index 000000000..bc6924239 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_collection_with_linked_app_data_with_msg_pack_data_and_update_authority_is_data_authority/53q1PCBy5KgzZfoHu6bnLWQFVmJtKyceP8DqNMhXWUaA differ diff --git a/integration_tests/src/data/accounts/mpl_core_get_collectionn/DHciVfQxHHM7t2asQJRjjkKbjvZ4PuG3Y3uiULMQUjJQ b/integration_tests/src/data/accounts/mpl_core_get_collectionn/DHciVfQxHHM7t2asQJRjjkKbjvZ4PuG3Y3uiULMQUjJQ new file mode 100644 index 000000000..a2fe58642 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_get_collectionn/DHciVfQxHHM7t2asQJRjjkKbjvZ4PuG3Y3uiULMQUjJQ differ diff --git a/integration_tests/src/data/accounts/mpl_core_verified_creators_plugin/AGyjcG9mBfYJFMZiJVkXr4iX7re6vkQ1Fw5grukA6Hiu b/integration_tests/src/data/accounts/mpl_core_verified_creators_plugin/AGyjcG9mBfYJFMZiJVkXr4iX7re6vkQ1Fw5grukA6Hiu new file mode 100644 index 000000000..62af76d3e Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_verified_creators_plugin/AGyjcG9mBfYJFMZiJVkXr4iX7re6vkQ1Fw5grukA6Hiu differ diff --git a/integration_tests/src/data/accounts/mpl_core_verified_creators_plugin_with_signature/4iVX1oZj6nLAMerjXFw3UeGD4QU7BEaCscsWqD3zEH37 b/integration_tests/src/data/accounts/mpl_core_verified_creators_plugin_with_signature/4iVX1oZj6nLAMerjXFw3UeGD4QU7BEaCscsWqD3zEH37 new file mode 100644 index 000000000..afea04796 Binary files /dev/null and b/integration_tests/src/data/accounts/mpl_core_verified_creators_plugin_with_signature/4iVX1oZj6nLAMerjXFw3UeGD4QU7BEaCscsWqD3zEH37 differ diff --git a/integration_tests/src/data/accounts/reg_get_asset/52J4tmbP9o3GHQrkts8vhXm84CLzM7KSXboKGnrw3YTW b/integration_tests/src/data/accounts/reg_get_asset/52J4tmbP9o3GHQrkts8vhXm84CLzM7KSXboKGnrw3YTW new file mode 100644 index 000000000..a54f37db0 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset/52J4tmbP9o3GHQrkts8vhXm84CLzM7KSXboKGnrw3YTW differ diff --git a/integration_tests/src/data/accounts/reg_get_asset/CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE b/integration_tests/src/data/accounts/reg_get_asset/CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE new file mode 100644 index 000000000..669172249 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset/CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE differ diff --git a/integration_tests/src/data/accounts/reg_get_asset/Fc1MSqW3eifoMuDupvimtu2DbSYmGUtSQ7wBUoz87uK5 b/integration_tests/src/data/accounts/reg_get_asset/Fc1MSqW3eifoMuDupvimtu2DbSYmGUtSQ7wBUoz87uK5 new file mode 100644 index 000000000..aa06c326e Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset/Fc1MSqW3eifoMuDupvimtu2DbSYmGUtSQ7wBUoz87uK5 differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_batch/13YBWA4DyLJc8QZPAAajNLefise35GZ2pVG2R97n5NYa b/integration_tests/src/data/accounts/reg_get_asset_batch/13YBWA4DyLJc8QZPAAajNLefise35GZ2pVG2R97n5NYa new file mode 100644 index 000000000..ea6fe39dc Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_batch/13YBWA4DyLJc8QZPAAajNLefise35GZ2pVG2R97n5NYa differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_batch/2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd b/integration_tests/src/data/accounts/reg_get_asset_batch/2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd new file mode 100644 index 000000000..5a04b7c77 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_batch/2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_batch/2iMQVnGpJfSRfyPSyDgnnGMwiCa9ap5j29VZvHuoABpp b/integration_tests/src/data/accounts/reg_get_asset_batch/2iMQVnGpJfSRfyPSyDgnnGMwiCa9ap5j29VZvHuoABpp new file mode 100644 index 000000000..082dfc494 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_batch/2iMQVnGpJfSRfyPSyDgnnGMwiCa9ap5j29VZvHuoABpp differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_batch/5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1 b/integration_tests/src/data/accounts/reg_get_asset_batch/5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1 new file mode 100644 index 000000000..73a594c5c Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_batch/5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1 differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_batch/AhbbU3exDvvM9cBzHxEjpYt4sMfFsXuQ3FUXv9VPE4F7 b/integration_tests/src/data/accounts/reg_get_asset_batch/AhbbU3exDvvM9cBzHxEjpYt4sMfFsXuQ3FUXv9VPE4F7 new file mode 100644 index 000000000..9cd0184fd Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_batch/AhbbU3exDvvM9cBzHxEjpYt4sMfFsXuQ3FUXv9VPE4F7 differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_batch/CxYMsK8m52REmAv8mre7TNJx2QLfHnVEdhnt4vht37q4 b/integration_tests/src/data/accounts/reg_get_asset_batch/CxYMsK8m52REmAv8mre7TNJx2QLfHnVEdhnt4vht37q4 new file mode 100644 index 000000000..046932d30 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_batch/CxYMsK8m52REmAv8mre7TNJx2QLfHnVEdhnt4vht37q4 differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_batch/EjkMvnYTrmpAWLVz55HHL3UxcZcPYwiSooLNPheLP1kk b/integration_tests/src/data/accounts/reg_get_asset_batch/EjkMvnYTrmpAWLVz55HHL3UxcZcPYwiSooLNPheLP1kk new file mode 100644 index 000000000..13e128c71 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_batch/EjkMvnYTrmpAWLVz55HHL3UxcZcPYwiSooLNPheLP1kk differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_batch/HTKAVZZrDdyecCxzm3WEkCsG1GUmiqKm73PvngfuYRNK b/integration_tests/src/data/accounts/reg_get_asset_batch/HTKAVZZrDdyecCxzm3WEkCsG1GUmiqKm73PvngfuYRNK new file mode 100644 index 000000000..e3257069d Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_batch/HTKAVZZrDdyecCxzm3WEkCsG1GUmiqKm73PvngfuYRNK differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_batch/cpdh48eio3EZFc3rZw7HR6P3thRr22qbcNXtvC9dC18 b/integration_tests/src/data/accounts/reg_get_asset_batch/cpdh48eio3EZFc3rZw7HR6P3thRr22qbcNXtvC9dC18 new file mode 100644 index 000000000..b769d8d4b Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_batch/cpdh48eio3EZFc3rZw7HR6P3thRr22qbcNXtvC9dC18 differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_by_group/4uTRch4H3kVSWdrQjUe1W5jEoEAxvw9KpYDGRSWUdeW9 b/integration_tests/src/data/accounts/reg_get_asset_by_group/4uTRch4H3kVSWdrQjUe1W5jEoEAxvw9KpYDGRSWUdeW9 new file mode 100644 index 000000000..f7c0ea54f Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_by_group/4uTRch4H3kVSWdrQjUe1W5jEoEAxvw9KpYDGRSWUdeW9 differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_by_group/5yF7HbXnjAy2BEq2EC3twan5yuVVXiAx22gbKy5Zi5qk b/integration_tests/src/data/accounts/reg_get_asset_by_group/5yF7HbXnjAy2BEq2EC3twan5yuVVXiAx22gbKy5Zi5qk new file mode 100644 index 000000000..742f01010 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_by_group/5yF7HbXnjAy2BEq2EC3twan5yuVVXiAx22gbKy5Zi5qk differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_by_group/7jFuJ73mBPDdLMvCYxzrpFTD9FeDudRxdXGDALP5Cp2W b/integration_tests/src/data/accounts/reg_get_asset_by_group/7jFuJ73mBPDdLMvCYxzrpFTD9FeDudRxdXGDALP5Cp2W new file mode 100644 index 000000000..8ad0f5e51 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_by_group/7jFuJ73mBPDdLMvCYxzrpFTD9FeDudRxdXGDALP5Cp2W differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_by_group/7om2zA1iCQ3FCm8pTUBTHi8M9p2oVai4FsxJFN8asgjg b/integration_tests/src/data/accounts/reg_get_asset_by_group/7om2zA1iCQ3FCm8pTUBTHi8M9p2oVai4FsxJFN8asgjg new file mode 100644 index 000000000..c5a542f94 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_by_group/7om2zA1iCQ3FCm8pTUBTHi8M9p2oVai4FsxJFN8asgjg differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_by_group/9B5ygnnZno5UnSuM14feYqCKESRawPfVnVc1CgcAR9QH b/integration_tests/src/data/accounts/reg_get_asset_by_group/9B5ygnnZno5UnSuM14feYqCKESRawPfVnVc1CgcAR9QH new file mode 100644 index 000000000..431ec8be7 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_by_group/9B5ygnnZno5UnSuM14feYqCKESRawPfVnVc1CgcAR9QH differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_by_group/BioVudBTjJnuDW22q62XPhGP87sVwZKcQ46MPSNz4gqi b/integration_tests/src/data/accounts/reg_get_asset_by_group/BioVudBTjJnuDW22q62XPhGP87sVwZKcQ46MPSNz4gqi new file mode 100644 index 000000000..aac046093 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_by_group/BioVudBTjJnuDW22q62XPhGP87sVwZKcQ46MPSNz4gqi differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_by_group/Eft9Evw7Jqd5ZeDhpTXHbpv5ZrZ3tvc1paSXAqHd51cy b/integration_tests/src/data/accounts/reg_get_asset_by_group/Eft9Evw7Jqd5ZeDhpTXHbpv5ZrZ3tvc1paSXAqHd51cy new file mode 100644 index 000000000..033b52062 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_by_group/Eft9Evw7Jqd5ZeDhpTXHbpv5ZrZ3tvc1paSXAqHd51cy differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_by_group/Fm9S3FL23z3ii3EBBv8ozqLninLvhWDYmcHcHaZy6nie b/integration_tests/src/data/accounts/reg_get_asset_by_group/Fm9S3FL23z3ii3EBBv8ozqLninLvhWDYmcHcHaZy6nie new file mode 100644 index 000000000..0a6f3855f Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_by_group/Fm9S3FL23z3ii3EBBv8ozqLninLvhWDYmcHcHaZy6nie differ diff --git a/integration_tests/src/data/accounts/reg_get_asset_by_group/GcvCfh1V4YYUwDLs2KBKxFxKggLNxaUSWkSxnpDCRUVA b/integration_tests/src/data/accounts/reg_get_asset_by_group/GcvCfh1V4YYUwDLs2KBKxFxKggLNxaUSWkSxnpDCRUVA new file mode 100644 index 000000000..0ed721dee Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_asset_by_group/GcvCfh1V4YYUwDLs2KBKxFxKggLNxaUSWkSxnpDCRUVA differ diff --git a/integration_tests/src/data/accounts/reg_get_assett/52J4tmbP9o3GHQrkts8vhXm84CLzM7KSXboKGnrw3YTW b/integration_tests/src/data/accounts/reg_get_assett/52J4tmbP9o3GHQrkts8vhXm84CLzM7KSXboKGnrw3YTW new file mode 100644 index 000000000..a3a7d051d Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_assett/52J4tmbP9o3GHQrkts8vhXm84CLzM7KSXboKGnrw3YTW differ diff --git a/integration_tests/src/data/accounts/reg_get_assett/CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE b/integration_tests/src/data/accounts/reg_get_assett/CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE new file mode 100644 index 000000000..2778e0dad Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_assett/CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE differ diff --git a/integration_tests/src/data/accounts/reg_get_assett/Fc1MSqW3eifoMuDupvimtu2DbSYmGUtSQ7wBUoz87uK5 b/integration_tests/src/data/accounts/reg_get_assett/Fc1MSqW3eifoMuDupvimtu2DbSYmGUtSQ7wBUoz87uK5 new file mode 100644 index 000000000..b226bec40 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_get_assett/Fc1MSqW3eifoMuDupvimtu2DbSYmGUtSQ7wBUoz87uK5 differ diff --git a/integration_tests/src/data/accounts/reg_search_assets/2PfAwPb2hdgsf7xCKyU2kAWUGKnkxYZLfg5SMf4YP1h2 b/integration_tests/src/data/accounts/reg_search_assets/2PfAwPb2hdgsf7xCKyU2kAWUGKnkxYZLfg5SMf4YP1h2 new file mode 100644 index 000000000..002bd8814 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_search_assets/2PfAwPb2hdgsf7xCKyU2kAWUGKnkxYZLfg5SMf4YP1h2 differ diff --git a/integration_tests/src/data/accounts/reg_search_assets/398rsAE6qxtkMPdMBqpVz7Ca52BdQrvPLs95pYrPRqnB b/integration_tests/src/data/accounts/reg_search_assets/398rsAE6qxtkMPdMBqpVz7Ca52BdQrvPLs95pYrPRqnB new file mode 100644 index 000000000..d2b9902b8 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_search_assets/398rsAE6qxtkMPdMBqpVz7Ca52BdQrvPLs95pYrPRqnB differ diff --git a/integration_tests/src/data/accounts/reg_search_assets/3XePvcrVVbGNKNuK7jrtAfeK8KUjCpXebL95RqGceYP8 b/integration_tests/src/data/accounts/reg_search_assets/3XePvcrVVbGNKNuK7jrtAfeK8KUjCpXebL95RqGceYP8 new file mode 100644 index 000000000..49b6c5a47 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_search_assets/3XePvcrVVbGNKNuK7jrtAfeK8KUjCpXebL95RqGceYP8 differ diff --git a/integration_tests/src/data/accounts/reg_search_assets/7DS81LourvNXo5PiWZp6G3vaSHZhY1rxT9VpG8YuFBiq b/integration_tests/src/data/accounts/reg_search_assets/7DS81LourvNXo5PiWZp6G3vaSHZhY1rxT9VpG8YuFBiq new file mode 100644 index 000000000..0640eec9c Binary files /dev/null and b/integration_tests/src/data/accounts/reg_search_assets/7DS81LourvNXo5PiWZp6G3vaSHZhY1rxT9VpG8YuFBiq differ diff --git a/integration_tests/src/data/accounts/reg_search_assets/A7gxL4pMsck1WuwfZx4XhnL8FVBBzuameBcQnX45gWeq b/integration_tests/src/data/accounts/reg_search_assets/A7gxL4pMsck1WuwfZx4XhnL8FVBBzuameBcQnX45gWeq new file mode 100644 index 000000000..687cfc543 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_search_assets/A7gxL4pMsck1WuwfZx4XhnL8FVBBzuameBcQnX45gWeq differ diff --git a/integration_tests/src/data/accounts/reg_search_assets/Dt3XDSAdXAJbHqvuycgCTHykKCC7tntMFGMmSvfBbpTL b/integration_tests/src/data/accounts/reg_search_assets/Dt3XDSAdXAJbHqvuycgCTHykKCC7tntMFGMmSvfBbpTL new file mode 100644 index 000000000..51792b5e2 Binary files /dev/null and b/integration_tests/src/data/accounts/reg_search_assets/Dt3XDSAdXAJbHqvuycgCTHykKCC7tntMFGMmSvfBbpTL differ diff --git a/integration_tests/src/data/largest_token_account_ids/2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd/2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd b/integration_tests/src/data/largest_token_account_ids/2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd/2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd new file mode 100644 index 000000000..f2691f365 --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd/2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd @@ -0,0 +1 @@ +±ªõ•Oé%ú› 5=¶<ã‹BbáZ#Òhû \ No newline at end of file diff --git a/integration_tests/src/data/largest_token_account_ids/2PfAwPb2hdgsf7xCKyU2kAWUGKnkxYZLfg5SMf4YP1h2/2PfAwPb2hdgsf7xCKyU2kAWUGKnkxYZLfg5SMf4YP1h2 b/integration_tests/src/data/largest_token_account_ids/2PfAwPb2hdgsf7xCKyU2kAWUGKnkxYZLfg5SMf4YP1h2/2PfAwPb2hdgsf7xCKyU2kAWUGKnkxYZLfg5SMf4YP1h2 new file mode 100644 index 000000000..4dc65b878 --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/2PfAwPb2hdgsf7xCKyU2kAWUGKnkxYZLfg5SMf4YP1h2/2PfAwPb2hdgsf7xCKyU2kAWUGKnkxYZLfg5SMf4YP1h2 @@ -0,0 +1 @@ +%‘ÀÞôÖ+[ƒþ`Úûcu¢”…‰‡ëáP¾U3¹ Ù· \ No newline at end of file diff --git a/integration_tests/src/data/largest_token_account_ids/5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1/5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1 b/integration_tests/src/data/largest_token_account_ids/5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1/5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1 new file mode 100644 index 000000000..db364f166 --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1/5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1 @@ -0,0 +1 @@ +‡Ñq œv5.²hÂäæ.|§tz‹¶W'as(D~Ž \ No newline at end of file diff --git a/integration_tests/src/data/largest_token_account_ids/7jFuJ73mBPDdLMvCYxzrpFTD9FeDudRxdXGDALP5Cp2W/7jFuJ73mBPDdLMvCYxzrpFTD9FeDudRxdXGDALP5Cp2W b/integration_tests/src/data/largest_token_account_ids/7jFuJ73mBPDdLMvCYxzrpFTD9FeDudRxdXGDALP5Cp2W/7jFuJ73mBPDdLMvCYxzrpFTD9FeDudRxdXGDALP5Cp2W new file mode 100644 index 000000000..dded31ff5 --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/7jFuJ73mBPDdLMvCYxzrpFTD9FeDudRxdXGDALP5Cp2W/7jFuJ73mBPDdLMvCYxzrpFTD9FeDudRxdXGDALP5Cp2W @@ -0,0 +1 @@ +ËÓÊÀF+761ŸÍòtW¹‚Ë™5Ýaàðõ56I¯† \ No newline at end of file diff --git a/integration_tests/src/data/largest_token_account_ids/843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f/843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f b/integration_tests/src/data/largest_token_account_ids/843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f/843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f new file mode 100644 index 000000000..15185d084 --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f/843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f @@ -0,0 +1 @@ +·ú–-Îh™Þ„È9⦠~íÃŒ LÞÒ>•,ê¡k³ \ No newline at end of file diff --git a/integration_tests/src/data/largest_token_account_ids/ANt9HygtvFmFJ1UcAHFLnM62JJWjk8fujMzjGfpKBfzk/ANt9HygtvFmFJ1UcAHFLnM62JJWjk8fujMzjGfpKBfzk b/integration_tests/src/data/largest_token_account_ids/ANt9HygtvFmFJ1UcAHFLnM62JJWjk8fujMzjGfpKBfzk/ANt9HygtvFmFJ1UcAHFLnM62JJWjk8fujMzjGfpKBfzk new file mode 100644 index 000000000..584f9f841 Binary files /dev/null and b/integration_tests/src/data/largest_token_account_ids/ANt9HygtvFmFJ1UcAHFLnM62JJWjk8fujMzjGfpKBfzk/ANt9HygtvFmFJ1UcAHFLnM62JJWjk8fujMzjGfpKBfzk differ diff --git a/integration_tests/src/data/largest_token_account_ids/Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8/Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8 b/integration_tests/src/data/largest_token_account_ids/Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8/Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8 new file mode 100644 index 000000000..14027b7ca --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8/Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8 @@ -0,0 +1 @@ +•´Ó‰àó¤Áös{xc»º…Âü—Îö<†K¬ª×Ödz \ No newline at end of file diff --git a/integration_tests/src/data/largest_token_account_ids/BioVudBTjJnuDW22q62XPhGP87sVwZKcQ46MPSNz4gqi/BioVudBTjJnuDW22q62XPhGP87sVwZKcQ46MPSNz4gqi b/integration_tests/src/data/largest_token_account_ids/BioVudBTjJnuDW22q62XPhGP87sVwZKcQ46MPSNz4gqi/BioVudBTjJnuDW22q62XPhGP87sVwZKcQ46MPSNz4gqi new file mode 100644 index 000000000..251d00821 --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/BioVudBTjJnuDW22q62XPhGP87sVwZKcQ46MPSNz4gqi/BioVudBTjJnuDW22q62XPhGP87sVwZKcQ46MPSNz4gqi @@ -0,0 +1 @@ +I×9´:+HÇ{žf¡Ø²Ã©™é^h”ÃÈäö]ž7ƒ \ No newline at end of file diff --git a/integration_tests/src/data/largest_token_account_ids/CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE/CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE b/integration_tests/src/data/largest_token_account_ids/CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE/CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE new file mode 100644 index 000000000..1f698e0b5 --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE/CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE @@ -0,0 +1,2 @@ +ØûXÂP|ÇÞº½†Ñûçù få +²‘‚‹ «8S9h \ No newline at end of file diff --git a/integration_tests/src/data/largest_token_account_ids/Dt3XDSAdXAJbHqvuycgCTHykKCC7tntMFGMmSvfBbpTL/Dt3XDSAdXAJbHqvuycgCTHykKCC7tntMFGMmSvfBbpTL b/integration_tests/src/data/largest_token_account_ids/Dt3XDSAdXAJbHqvuycgCTHykKCC7tntMFGMmSvfBbpTL/Dt3XDSAdXAJbHqvuycgCTHykKCC7tntMFGMmSvfBbpTL new file mode 100644 index 000000000..8cb0a8835 --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/Dt3XDSAdXAJbHqvuycgCTHykKCC7tntMFGMmSvfBbpTL/Dt3XDSAdXAJbHqvuycgCTHykKCC7tntMFGMmSvfBbpTL @@ -0,0 +1 @@ +\U~Ôçi¨JY–I4‹qîòñWï«m¦ŸCG…­_ò \ No newline at end of file diff --git a/integration_tests/src/data/largest_token_account_ids/Fm9S3FL23z3ii3EBBv8ozqLninLvhWDYmcHcHaZy6nie/Fm9S3FL23z3ii3EBBv8ozqLninLvhWDYmcHcHaZy6nie b/integration_tests/src/data/largest_token_account_ids/Fm9S3FL23z3ii3EBBv8ozqLninLvhWDYmcHcHaZy6nie/Fm9S3FL23z3ii3EBBv8ozqLninLvhWDYmcHcHaZy6nie new file mode 100644 index 000000000..734685480 --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/Fm9S3FL23z3ii3EBBv8ozqLninLvhWDYmcHcHaZy6nie/Fm9S3FL23z3ii3EBBv8ozqLninLvhWDYmcHcHaZy6nie @@ -0,0 +1 @@ +:$ÆïÜɆt(™¼¢«ª¡;!ÓÖ,ç«´0?¡~ \ No newline at end of file diff --git a/integration_tests/src/data/largest_token_account_ids/HTKAVZZrDdyecCxzm3WEkCsG1GUmiqKm73PvngfuYRNK/HTKAVZZrDdyecCxzm3WEkCsG1GUmiqKm73PvngfuYRNK b/integration_tests/src/data/largest_token_account_ids/HTKAVZZrDdyecCxzm3WEkCsG1GUmiqKm73PvngfuYRNK/HTKAVZZrDdyecCxzm3WEkCsG1GUmiqKm73PvngfuYRNK new file mode 100644 index 000000000..13cdc4193 Binary files /dev/null and b/integration_tests/src/data/largest_token_account_ids/HTKAVZZrDdyecCxzm3WEkCsG1GUmiqKm73PvngfuYRNK/HTKAVZZrDdyecCxzm3WEkCsG1GUmiqKm73PvngfuYRNK differ diff --git a/integration_tests/src/data/transactions/asset_decompress/3Ct9n9hv5PWEYbsrgDdUDqegzsnX2n5jYRxkq5YafFAueup8mTYmN4nHhNCaEwVyVAVqNssr4fizdg9wRavT7ydE b/integration_tests/src/data/transactions/asset_decompress/3Ct9n9hv5PWEYbsrgDdUDqegzsnX2n5jYRxkq5YafFAueup8mTYmN4nHhNCaEwVyVAVqNssr4fizdg9wRavT7ydE new file mode 100644 index 000000000..015b543df Binary files /dev/null and b/integration_tests/src/data/transactions/asset_decompress/3Ct9n9hv5PWEYbsrgDdUDqegzsnX2n5jYRxkq5YafFAueup8mTYmN4nHhNCaEwVyVAVqNssr4fizdg9wRavT7ydE differ diff --git a/integration_tests/src/data/transactions/asset_decompress/4FQRV38NSP6gDo8qDbTBfy8UDHUd6Lzu4GXbHtfvWbtCArkVcbGQwinZ7M61eCmPEF5L8xu4tLAXL7ozbh5scfRi b/integration_tests/src/data/transactions/asset_decompress/4FQRV38NSP6gDo8qDbTBfy8UDHUd6Lzu4GXbHtfvWbtCArkVcbGQwinZ7M61eCmPEF5L8xu4tLAXL7ozbh5scfRi new file mode 100644 index 000000000..ca99e06a4 Binary files /dev/null and b/integration_tests/src/data/transactions/asset_decompress/4FQRV38NSP6gDo8qDbTBfy8UDHUd6Lzu4GXbHtfvWbtCArkVcbGQwinZ7M61eCmPEF5L8xu4tLAXL7ozbh5scfRi differ diff --git a/integration_tests/src/data/transactions/asset_decompress/55tQCoLUtHyu4i6Dny6SMdq4dVD61nuuLxXvRLeeQqE6xdm66Ajm4so39MXcJ2VaTmCNDEFBpitzLkiFaF7rNtHi b/integration_tests/src/data/transactions/asset_decompress/55tQCoLUtHyu4i6Dny6SMdq4dVD61nuuLxXvRLeeQqE6xdm66Ajm4so39MXcJ2VaTmCNDEFBpitzLkiFaF7rNtHi new file mode 100644 index 000000000..73c54cb19 Binary files /dev/null and b/integration_tests/src/data/transactions/asset_decompress/55tQCoLUtHyu4i6Dny6SMdq4dVD61nuuLxXvRLeeQqE6xdm66Ajm4so39MXcJ2VaTmCNDEFBpitzLkiFaF7rNtHi differ diff --git a/integration_tests/src/data/transactions/cnft_owners_table/25djDqCTka7wEnNMRVwsqSsVHqQMknPReTUCmvF4XGD9bUD494jZ1FsPaPjbAK45TxpdVuF2RwVCK9Jq7oxZAtMB b/integration_tests/src/data/transactions/cnft_owners_table/25djDqCTka7wEnNMRVwsqSsVHqQMknPReTUCmvF4XGD9bUD494jZ1FsPaPjbAK45TxpdVuF2RwVCK9Jq7oxZAtMB new file mode 100644 index 000000000..4f57eac09 Binary files /dev/null and b/integration_tests/src/data/transactions/cnft_owners_table/25djDqCTka7wEnNMRVwsqSsVHqQMknPReTUCmvF4XGD9bUD494jZ1FsPaPjbAK45TxpdVuF2RwVCK9Jq7oxZAtMB differ diff --git a/integration_tests/src/data/transactions/cnft_owners_table/3UrxyfoJKH2jvVkzZZuCMXxtyaFUtgjhoQmirwhyiFjZXA8oM3QCBixCSBj9b53t5scvsm3qpuq5Qm4cGbNuwQP7 b/integration_tests/src/data/transactions/cnft_owners_table/3UrxyfoJKH2jvVkzZZuCMXxtyaFUtgjhoQmirwhyiFjZXA8oM3QCBixCSBj9b53t5scvsm3qpuq5Qm4cGbNuwQP7 new file mode 100644 index 000000000..ca454df25 Binary files /dev/null and b/integration_tests/src/data/transactions/cnft_owners_table/3UrxyfoJKH2jvVkzZZuCMXxtyaFUtgjhoQmirwhyiFjZXA8oM3QCBixCSBj9b53t5scvsm3qpuq5Qm4cGbNuwQP7 differ diff --git a/integration_tests/src/data/transactions/cnft_owners_table/4fzBjTaXmrrJReLLSYPzn1fhPfuiU2EU1hGUddtHV1B49pvRewGyyzvMMpssi7K4Y5ZYj5xS9DrJuxqJDZRMZqY1 b/integration_tests/src/data/transactions/cnft_owners_table/4fzBjTaXmrrJReLLSYPzn1fhPfuiU2EU1hGUddtHV1B49pvRewGyyzvMMpssi7K4Y5ZYj5xS9DrJuxqJDZRMZqY1 new file mode 100644 index 000000000..6520e2dc0 Binary files /dev/null and b/integration_tests/src/data/transactions/cnft_owners_table/4fzBjTaXmrrJReLLSYPzn1fhPfuiU2EU1hGUddtHV1B49pvRewGyyzvMMpssi7K4Y5ZYj5xS9DrJuxqJDZRMZqY1 differ diff --git a/integration_tests/src/data/transactions/cnft_scenario_mint_update_metadata/2DP84v6Pi3e4v5i7KSvzmK4Ufbzof3TAiEqDbm9gg8jZpBRF9f1Cy6x54kvZoHPX9k1XfqbsG1FTv2KVP9fvNrN6 b/integration_tests/src/data/transactions/cnft_scenario_mint_update_metadata/2DP84v6Pi3e4v5i7KSvzmK4Ufbzof3TAiEqDbm9gg8jZpBRF9f1Cy6x54kvZoHPX9k1XfqbsG1FTv2KVP9fvNrN6 new file mode 100644 index 000000000..a25f1b837 Binary files /dev/null and b/integration_tests/src/data/transactions/cnft_scenario_mint_update_metadata/2DP84v6Pi3e4v5i7KSvzmK4Ufbzof3TAiEqDbm9gg8jZpBRF9f1Cy6x54kvZoHPX9k1XfqbsG1FTv2KVP9fvNrN6 differ diff --git a/integration_tests/src/data/transactions/cnft_scenario_mint_update_metadata/3bsL5zmLKvhN9Je4snTKxjFSpmXEEg2cvMHm2rCNgaEYkNXBqJTA4N7QmvBSWPiNUQPtzJSYzpQYX92NowV3L7vN b/integration_tests/src/data/transactions/cnft_scenario_mint_update_metadata/3bsL5zmLKvhN9Je4snTKxjFSpmXEEg2cvMHm2rCNgaEYkNXBqJTA4N7QmvBSWPiNUQPtzJSYzpQYX92NowV3L7vN new file mode 100644 index 000000000..0223d8bcf Binary files /dev/null and b/integration_tests/src/data/transactions/cnft_scenario_mint_update_metadata/3bsL5zmLKvhN9Je4snTKxjFSpmXEEg2cvMHm2rCNgaEYkNXBqJTA4N7QmvBSWPiNUQPtzJSYzpQYX92NowV3L7vN differ diff --git a/integration_tests/src/data/transactions/cnft_scenario_mint_update_metadata_remove_creators/2qMQrXfRE7pdnjwobWeqDkEhsv6MYmv3JdgvNxTVaL1VrMCZ4JYkUnu7jiJb2etX3W9WyQgSxktUgn9skxCeqTo5 b/integration_tests/src/data/transactions/cnft_scenario_mint_update_metadata_remove_creators/2qMQrXfRE7pdnjwobWeqDkEhsv6MYmv3JdgvNxTVaL1VrMCZ4JYkUnu7jiJb2etX3W9WyQgSxktUgn9skxCeqTo5 new file mode 100644 index 000000000..bda639523 Binary files /dev/null and b/integration_tests/src/data/transactions/cnft_scenario_mint_update_metadata_remove_creators/2qMQrXfRE7pdnjwobWeqDkEhsv6MYmv3JdgvNxTVaL1VrMCZ4JYkUnu7jiJb2etX3W9WyQgSxktUgn9skxCeqTo5 differ diff --git a/integration_tests/src/data/transactions/cnft_scenario_mint_update_metadata_remove_creators/41YW187sn6Z2dXfqz6zSbnPtQoE826cCSgTLnMLKa9rH1xrCqAXBQNwKnzjGc9wjU5RtMCqKhy2eMN2TjuYC8veB b/integration_tests/src/data/transactions/cnft_scenario_mint_update_metadata_remove_creators/41YW187sn6Z2dXfqz6zSbnPtQoE826cCSgTLnMLKa9rH1xrCqAXBQNwKnzjGc9wjU5RtMCqKhy2eMN2TjuYC8veB new file mode 100644 index 000000000..fc26c68a9 Binary files /dev/null and b/integration_tests/src/data/transactions/cnft_scenario_mint_update_metadata_remove_creators/41YW187sn6Z2dXfqz6zSbnPtQoE826cCSgTLnMLKa9rH1xrCqAXBQNwKnzjGc9wjU5RtMCqKhy2eMN2TjuYC8veB differ diff --git a/integration_tests/src/data/transactions/mint_delegate_transfer/3B1sASkuToCWuGFRG47axQDm1SpgLi8qDDGnRFeR7LB6oa5C3ZmkEuX98373gdMTBXED44FkwT227kBBAGSw7e8M b/integration_tests/src/data/transactions/mint_delegate_transfer/3B1sASkuToCWuGFRG47axQDm1SpgLi8qDDGnRFeR7LB6oa5C3ZmkEuX98373gdMTBXED44FkwT227kBBAGSw7e8M new file mode 100644 index 000000000..69de59cad Binary files /dev/null and b/integration_tests/src/data/transactions/mint_delegate_transfer/3B1sASkuToCWuGFRG47axQDm1SpgLi8qDDGnRFeR7LB6oa5C3ZmkEuX98373gdMTBXED44FkwT227kBBAGSw7e8M differ diff --git a/integration_tests/src/data/transactions/mint_delegate_transfer/5Q8TAMMkMTHEM2BHyD2fp2sVdYKByFeATzM2mHF6Xbbar33WaeuygPKGYCWiDEt3MZU1mUrq1ePnT9o4Pa318p8w b/integration_tests/src/data/transactions/mint_delegate_transfer/5Q8TAMMkMTHEM2BHyD2fp2sVdYKByFeATzM2mHF6Xbbar33WaeuygPKGYCWiDEt3MZU1mUrq1ePnT9o4Pa318p8w new file mode 100644 index 000000000..666b8b685 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_delegate_transfer/5Q8TAMMkMTHEM2BHyD2fp2sVdYKByFeATzM2mHF6Xbbar33WaeuygPKGYCWiDEt3MZU1mUrq1ePnT9o4Pa318p8w differ diff --git a/integration_tests/src/data/transactions/mint_delegate_transfer/KNWsAYPo3mm1HuFxRyEwBBMUZ2hqTnFXjoPVFo7WxGTfmfRwz6K8eERc4dnJpHyuoDkAZu1czK55iB1SbtCsdW2 b/integration_tests/src/data/transactions/mint_delegate_transfer/KNWsAYPo3mm1HuFxRyEwBBMUZ2hqTnFXjoPVFo7WxGTfmfRwz6K8eERc4dnJpHyuoDkAZu1czK55iB1SbtCsdW2 new file mode 100644 index 000000000..78a48c184 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_delegate_transfer/KNWsAYPo3mm1HuFxRyEwBBMUZ2hqTnFXjoPVFo7WxGTfmfRwz6K8eERc4dnJpHyuoDkAZu1czK55iB1SbtCsdW2 differ diff --git a/integration_tests/src/data/transactions/mint_no_json_uri/4ASu45ELoTmvwhNqokGQrh2VH8p5zeUepYLbkcULMeXSCZJGrJa7ojgdVh5JUxBjAMF9Lrp55EgUUFPaPeWKejNQ b/integration_tests/src/data/transactions/mint_no_json_uri/4ASu45ELoTmvwhNqokGQrh2VH8p5zeUepYLbkcULMeXSCZJGrJa7ojgdVh5JUxBjAMF9Lrp55EgUUFPaPeWKejNQ new file mode 100644 index 000000000..566aab62a Binary files /dev/null and b/integration_tests/src/data/transactions/mint_no_json_uri/4ASu45ELoTmvwhNqokGQrh2VH8p5zeUepYLbkcULMeXSCZJGrJa7ojgdVh5JUxBjAMF9Lrp55EgUUFPaPeWKejNQ differ diff --git a/integration_tests/src/data/transactions/mint_redeem/3Ct9n9hv5PWEYbsrgDdUDqegzsnX2n5jYRxkq5YafFAueup8mTYmN4nHhNCaEwVyVAVqNssr4fizdg9wRavT7ydE b/integration_tests/src/data/transactions/mint_redeem/3Ct9n9hv5PWEYbsrgDdUDqegzsnX2n5jYRxkq5YafFAueup8mTYmN4nHhNCaEwVyVAVqNssr4fizdg9wRavT7ydE new file mode 100644 index 000000000..ede4fe3ec Binary files /dev/null and b/integration_tests/src/data/transactions/mint_redeem/3Ct9n9hv5PWEYbsrgDdUDqegzsnX2n5jYRxkq5YafFAueup8mTYmN4nHhNCaEwVyVAVqNssr4fizdg9wRavT7ydE differ diff --git a/integration_tests/src/data/transactions/mint_redeem/4FQRV38NSP6gDo8qDbTBfy8UDHUd6Lzu4GXbHtfvWbtCArkVcbGQwinZ7M61eCmPEF5L8xu4tLAXL7ozbh5scfRi b/integration_tests/src/data/transactions/mint_redeem/4FQRV38NSP6gDo8qDbTBfy8UDHUd6Lzu4GXbHtfvWbtCArkVcbGQwinZ7M61eCmPEF5L8xu4tLAXL7ozbh5scfRi new file mode 100644 index 000000000..ef1c09b03 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_redeem/4FQRV38NSP6gDo8qDbTBfy8UDHUd6Lzu4GXbHtfvWbtCArkVcbGQwinZ7M61eCmPEF5L8xu4tLAXL7ozbh5scfRi differ diff --git a/integration_tests/src/data/transactions/mint_redeem/55tQCoLUtHyu4i6Dny6SMdq4dVD61nuuLxXvRLeeQqE6xdm66Ajm4so39MXcJ2VaTmCNDEFBpitzLkiFaF7rNtHi b/integration_tests/src/data/transactions/mint_redeem/55tQCoLUtHyu4i6Dny6SMdq4dVD61nuuLxXvRLeeQqE6xdm66Ajm4so39MXcJ2VaTmCNDEFBpitzLkiFaF7rNtHi new file mode 100644 index 000000000..695e5a280 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_redeem/55tQCoLUtHyu4i6Dny6SMdq4dVD61nuuLxXvRLeeQqE6xdm66Ajm4so39MXcJ2VaTmCNDEFBpitzLkiFaF7rNtHi differ diff --git a/integration_tests/src/data/transactions/mint_redeem_cancel_redeem/32FpSe6r9jnFNjjvbx2PPQdZqs5KpMoF6yawiRW1F6ctu1kmx2B4sLDBGjsthVQtmnhaJVrqdtmUP893FwXCbqY5 b/integration_tests/src/data/transactions/mint_redeem_cancel_redeem/32FpSe6r9jnFNjjvbx2PPQdZqs5KpMoF6yawiRW1F6ctu1kmx2B4sLDBGjsthVQtmnhaJVrqdtmUP893FwXCbqY5 new file mode 100644 index 000000000..d439c59f2 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_redeem_cancel_redeem/32FpSe6r9jnFNjjvbx2PPQdZqs5KpMoF6yawiRW1F6ctu1kmx2B4sLDBGjsthVQtmnhaJVrqdtmUP893FwXCbqY5 differ diff --git a/integration_tests/src/data/transactions/mint_redeem_cancel_redeem/3uzWoVgLGVd9cGXaF3JW7znpWgKse3obCa2Vvdoe59kaziX84mEXTwecUoZ49PkJDjReRMSXksKzyfj7pf3ekAGR b/integration_tests/src/data/transactions/mint_redeem_cancel_redeem/3uzWoVgLGVd9cGXaF3JW7znpWgKse3obCa2Vvdoe59kaziX84mEXTwecUoZ49PkJDjReRMSXksKzyfj7pf3ekAGR new file mode 100644 index 000000000..5a1694149 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_redeem_cancel_redeem/3uzWoVgLGVd9cGXaF3JW7znpWgKse3obCa2Vvdoe59kaziX84mEXTwecUoZ49PkJDjReRMSXksKzyfj7pf3ekAGR differ diff --git a/integration_tests/src/data/transactions/mint_redeem_cancel_redeem/49bJ8U3cK9htmLvA1mhXXcjKdpV2YN5JQBrb3Quh7wxENz1BP9F8fE9CKsje41aMbZwzgomnkXirKx2Xpdvprtak b/integration_tests/src/data/transactions/mint_redeem_cancel_redeem/49bJ8U3cK9htmLvA1mhXXcjKdpV2YN5JQBrb3Quh7wxENz1BP9F8fE9CKsje41aMbZwzgomnkXirKx2Xpdvprtak new file mode 100644 index 000000000..4a073e5de Binary files /dev/null and b/integration_tests/src/data/transactions/mint_redeem_cancel_redeem/49bJ8U3cK9htmLvA1mhXXcjKdpV2YN5JQBrb3Quh7wxENz1BP9F8fE9CKsje41aMbZwzgomnkXirKx2Xpdvprtak differ diff --git a/integration_tests/src/data/transactions/mint_to_collection_unverify_collection/7nK9a2DSDZ4Gh6DatmxGJmuLiDEswaY9bYSSPTtQppk7PtLKXYE84jWzm7AC4G1fpa831GaXuXcn5n5ybWqB4e5 b/integration_tests/src/data/transactions/mint_to_collection_unverify_collection/7nK9a2DSDZ4Gh6DatmxGJmuLiDEswaY9bYSSPTtQppk7PtLKXYE84jWzm7AC4G1fpa831GaXuXcn5n5ybWqB4e5 new file mode 100644 index 000000000..48d77a3b6 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_to_collection_unverify_collection/7nK9a2DSDZ4Gh6DatmxGJmuLiDEswaY9bYSSPTtQppk7PtLKXYE84jWzm7AC4G1fpa831GaXuXcn5n5ybWqB4e5 differ diff --git a/integration_tests/src/data/transactions/mint_to_collection_unverify_collection/tzXASugk8578bmtA3JAFQLEfcVQp3Np3rU9fyFas2Svk8nyBHXJnf7PdqebGNsSTwx6CEWpDCP5oLoCDcmbP35B b/integration_tests/src/data/transactions/mint_to_collection_unverify_collection/tzXASugk8578bmtA3JAFQLEfcVQp3Np3rU9fyFas2Svk8nyBHXJnf7PdqebGNsSTwx6CEWpDCP5oLoCDcmbP35B new file mode 100644 index 000000000..24a6aaff6 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_to_collection_unverify_collection/tzXASugk8578bmtA3JAFQLEfcVQp3Np3rU9fyFas2Svk8nyBHXJnf7PdqebGNsSTwx6CEWpDCP5oLoCDcmbP35B differ diff --git a/integration_tests/src/data/transactions/mint_transfer_burn/5coWPFty37s7haT3SVyMf6PkTaABEnhCRhfDjXeMNS58czHB5dCFPY6VrsZNwxBnqypmNic1LbLp1j5qjbdnZAc8 b/integration_tests/src/data/transactions/mint_transfer_burn/5coWPFty37s7haT3SVyMf6PkTaABEnhCRhfDjXeMNS58czHB5dCFPY6VrsZNwxBnqypmNic1LbLp1j5qjbdnZAc8 new file mode 100644 index 000000000..2fab26b71 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_transfer_burn/5coWPFty37s7haT3SVyMf6PkTaABEnhCRhfDjXeMNS58czHB5dCFPY6VrsZNwxBnqypmNic1LbLp1j5qjbdnZAc8 differ diff --git a/integration_tests/src/data/transactions/mint_transfer_burn/KHNhLijkAMeKeKm6kpbk3go6q9uMF3zmfCoYSBgERe8qJDW8q5ANpnkyBuyVkychXCeWzRY8i5EtKfeGaDDU23w b/integration_tests/src/data/transactions/mint_transfer_burn/KHNhLijkAMeKeKm6kpbk3go6q9uMF3zmfCoYSBgERe8qJDW8q5ANpnkyBuyVkychXCeWzRY8i5EtKfeGaDDU23w new file mode 100644 index 000000000..3e2026e46 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_transfer_burn/KHNhLijkAMeKeKm6kpbk3go6q9uMF3zmfCoYSBgERe8qJDW8q5ANpnkyBuyVkychXCeWzRY8i5EtKfeGaDDU23w differ diff --git a/integration_tests/src/data/transactions/mint_transfer_burn/k6jmJcurgBQ6F2bVa86Z1vGb7ievzxwRZ8GAqzFEG8HicDizxceYPUm1KTzWZ3QKtGgy1EuFWUGCRqBeKU9SAoJ b/integration_tests/src/data/transactions/mint_transfer_burn/k6jmJcurgBQ6F2bVa86Z1vGb7ievzxwRZ8GAqzFEG8HicDizxceYPUm1KTzWZ3QKtGgy1EuFWUGCRqBeKU9SAoJ new file mode 100644 index 000000000..acd42a476 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_transfer_burn/k6jmJcurgBQ6F2bVa86Z1vGb7ievzxwRZ8GAqzFEG8HicDizxceYPUm1KTzWZ3QKtGgy1EuFWUGCRqBeKU9SAoJ differ diff --git a/integration_tests/src/data/transactions/mint_transfer_mpl_programs/3iJ6XzhUXxGQYEEUnfkbZGdrkgS2o9vXUpsXALet3Co6sFQ2h7J21J4dTgSka8qoKiUFUzrXZFHfkqss1VFivnAG b/integration_tests/src/data/transactions/mint_transfer_mpl_programs/3iJ6XzhUXxGQYEEUnfkbZGdrkgS2o9vXUpsXALet3Co6sFQ2h7J21J4dTgSka8qoKiUFUzrXZFHfkqss1VFivnAG new file mode 100644 index 000000000..a7f275e45 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_transfer_mpl_programs/3iJ6XzhUXxGQYEEUnfkbZGdrkgS2o9vXUpsXALet3Co6sFQ2h7J21J4dTgSka8qoKiUFUzrXZFHfkqss1VFivnAG differ diff --git a/integration_tests/src/data/transactions/mint_transfer_mpl_programs/4gV14HQBm8GCXjSTHEXjrhUNGmsBiyNdWY9hhCapH9cshmqbPKxn2kUU1XbajZ9j1Pxng95onzR6dx5bYqxQRh2a b/integration_tests/src/data/transactions/mint_transfer_mpl_programs/4gV14HQBm8GCXjSTHEXjrhUNGmsBiyNdWY9hhCapH9cshmqbPKxn2kUU1XbajZ9j1Pxng95onzR6dx5bYqxQRh2a new file mode 100644 index 000000000..df77336de Binary files /dev/null and b/integration_tests/src/data/transactions/mint_transfer_mpl_programs/4gV14HQBm8GCXjSTHEXjrhUNGmsBiyNdWY9hhCapH9cshmqbPKxn2kUU1XbajZ9j1Pxng95onzR6dx5bYqxQRh2a differ diff --git a/integration_tests/src/data/transactions/mint_transfer_mpl_programs/T571TWE76frw6mWxYoHDrTdxYq7hJSyCtVEG4qmemPPtsc1CCKdknn9rTMAVcdeukLfwB1G97LZLH8eHLvuByoA b/integration_tests/src/data/transactions/mint_transfer_mpl_programs/T571TWE76frw6mWxYoHDrTdxYq7hJSyCtVEG4qmemPPtsc1CCKdknn9rTMAVcdeukLfwB1G97LZLH8eHLvuByoA new file mode 100644 index 000000000..e6d08dea4 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_transfer_mpl_programs/T571TWE76frw6mWxYoHDrTdxYq7hJSyCtVEG4qmemPPtsc1CCKdknn9rTMAVcdeukLfwB1G97LZLH8eHLvuByoA differ diff --git a/integration_tests/src/data/transactions/mint_transfer_noop/4URwUGBjbsF7UBUYdSC546tnBy7nD67txsso8D9CR9kGLtbbYh9NkGw15tEp16LLasmJX5VQR4Seh8gDjTrtdpoC b/integration_tests/src/data/transactions/mint_transfer_noop/4URwUGBjbsF7UBUYdSC546tnBy7nD67txsso8D9CR9kGLtbbYh9NkGw15tEp16LLasmJX5VQR4Seh8gDjTrtdpoC new file mode 100644 index 000000000..a9f81afe7 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_transfer_noop/4URwUGBjbsF7UBUYdSC546tnBy7nD67txsso8D9CR9kGLtbbYh9NkGw15tEp16LLasmJX5VQR4Seh8gDjTrtdpoC differ diff --git a/integration_tests/src/data/transactions/mint_transfer_noop/4nKDSvw2kGpccZWLEPnfdP7J1SEexQFRP3xWc9NBtQ1qQeGu3bu5WnAdpcLbjQ4iyX6BQ5QGF69wevE8ZeeY5poA b/integration_tests/src/data/transactions/mint_transfer_noop/4nKDSvw2kGpccZWLEPnfdP7J1SEexQFRP3xWc9NBtQ1qQeGu3bu5WnAdpcLbjQ4iyX6BQ5QGF69wevE8ZeeY5poA new file mode 100644 index 000000000..23b8337f4 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_transfer_noop/4nKDSvw2kGpccZWLEPnfdP7J1SEexQFRP3xWc9NBtQ1qQeGu3bu5WnAdpcLbjQ4iyX6BQ5QGF69wevE8ZeeY5poA differ diff --git a/integration_tests/src/data/transactions/mint_transfer_noop/5bNyZfmxLVP9cKc6GjvozExrSt4F1QFt4PP992pQwT8FFHdWsX3ZFNvwurfU2xpDYtQ7qAUxVahGCraXMevRH8p1 b/integration_tests/src/data/transactions/mint_transfer_noop/5bNyZfmxLVP9cKc6GjvozExrSt4F1QFt4PP992pQwT8FFHdWsX3ZFNvwurfU2xpDYtQ7qAUxVahGCraXMevRH8p1 new file mode 100644 index 000000000..d61d4ac70 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_transfer_noop/5bNyZfmxLVP9cKc6GjvozExrSt4F1QFt4PP992pQwT8FFHdWsX3ZFNvwurfU2xpDYtQ7qAUxVahGCraXMevRH8p1 differ diff --git a/integration_tests/src/data/transactions/mint_transfer_transfer/34xjcNf3rZFKz381hKpFLqxpojaDgXEpCqH5qcpTXLaJnDbtqRz35wiuMF1cAgvJGLzYYrwaMvCK1D7LxYsdpMU1 b/integration_tests/src/data/transactions/mint_transfer_transfer/34xjcNf3rZFKz381hKpFLqxpojaDgXEpCqH5qcpTXLaJnDbtqRz35wiuMF1cAgvJGLzYYrwaMvCK1D7LxYsdpMU1 new file mode 100644 index 000000000..40bcf40eb Binary files /dev/null and b/integration_tests/src/data/transactions/mint_transfer_transfer/34xjcNf3rZFKz381hKpFLqxpojaDgXEpCqH5qcpTXLaJnDbtqRz35wiuMF1cAgvJGLzYYrwaMvCK1D7LxYsdpMU1 differ diff --git a/integration_tests/src/data/transactions/mint_transfer_transfer/5VC3Jqr5X1N8NB8zuSahHpayekLVozYkDiPjJLqU6H5M6fq9ExVLGYYKKCPbeksMPXTjy65sdEQGPzDWAYPs8QjP b/integration_tests/src/data/transactions/mint_transfer_transfer/5VC3Jqr5X1N8NB8zuSahHpayekLVozYkDiPjJLqU6H5M6fq9ExVLGYYKKCPbeksMPXTjy65sdEQGPzDWAYPs8QjP new file mode 100644 index 000000000..e2866921f Binary files /dev/null and b/integration_tests/src/data/transactions/mint_transfer_transfer/5VC3Jqr5X1N8NB8zuSahHpayekLVozYkDiPjJLqU6H5M6fq9ExVLGYYKKCPbeksMPXTjy65sdEQGPzDWAYPs8QjP differ diff --git a/integration_tests/src/data/transactions/mint_transfer_transfer/5bq936UgGs4RnxM78iXp1PwVhr8sTYoEsHCWpr8QBFtc2YtS3ieYHcsPG46G2ikwrS3tXYnUK93PzseT52AR81RR b/integration_tests/src/data/transactions/mint_transfer_transfer/5bq936UgGs4RnxM78iXp1PwVhr8sTYoEsHCWpr8QBFtc2YtS3ieYHcsPG46G2ikwrS3tXYnUK93PzseT52AR81RR new file mode 100644 index 000000000..c6d8e6d10 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_transfer_transfer/5bq936UgGs4RnxM78iXp1PwVhr8sTYoEsHCWpr8QBFtc2YtS3ieYHcsPG46G2ikwrS3tXYnUK93PzseT52AR81RR differ diff --git a/integration_tests/src/data/transactions/mint_verify_collection/5ZKjPxm3WAZzuqqkCDjgKpm9b5XjB9cuvv68JvXxWThvJaJxcMJgpSbYs4gDA9dGJyeLzsgNtnS6oubANF1KbBmt b/integration_tests/src/data/transactions/mint_verify_collection/5ZKjPxm3WAZzuqqkCDjgKpm9b5XjB9cuvv68JvXxWThvJaJxcMJgpSbYs4gDA9dGJyeLzsgNtnS6oubANF1KbBmt new file mode 100644 index 000000000..acc78c53a Binary files /dev/null and b/integration_tests/src/data/transactions/mint_verify_collection/5ZKjPxm3WAZzuqqkCDjgKpm9b5XjB9cuvv68JvXxWThvJaJxcMJgpSbYs4gDA9dGJyeLzsgNtnS6oubANF1KbBmt differ diff --git a/integration_tests/src/data/transactions/mint_verify_collection/63xhs5bXcuMR3uMACXWkkFMm7BJ9Thknh7WNMPzV8HJBNwpyxJTr98NrLFHnTZDHdSUFD42VFQx8rjSaGynWbaRs b/integration_tests/src/data/transactions/mint_verify_collection/63xhs5bXcuMR3uMACXWkkFMm7BJ9Thknh7WNMPzV8HJBNwpyxJTr98NrLFHnTZDHdSUFD42VFQx8rjSaGynWbaRs new file mode 100644 index 000000000..bd15ff8fd Binary files /dev/null and b/integration_tests/src/data/transactions/mint_verify_collection/63xhs5bXcuMR3uMACXWkkFMm7BJ9Thknh7WNMPzV8HJBNwpyxJTr98NrLFHnTZDHdSUFD42VFQx8rjSaGynWbaRs differ diff --git a/integration_tests/src/data/transactions/mint_verify_collection_unverify_collection/4hQQsDKgDx5PpZR7nGvxKsLSvX4J7voaiJC3ag7dPuu4HY5kbvaqD2gyeHbdja1f22ypmzouRNpuo6sbyGDSSgya b/integration_tests/src/data/transactions/mint_verify_collection_unverify_collection/4hQQsDKgDx5PpZR7nGvxKsLSvX4J7voaiJC3ag7dPuu4HY5kbvaqD2gyeHbdja1f22ypmzouRNpuo6sbyGDSSgya new file mode 100644 index 000000000..fbed702da Binary files /dev/null and b/integration_tests/src/data/transactions/mint_verify_collection_unverify_collection/4hQQsDKgDx5PpZR7nGvxKsLSvX4J7voaiJC3ag7dPuu4HY5kbvaqD2gyeHbdja1f22ypmzouRNpuo6sbyGDSSgya differ diff --git a/integration_tests/src/data/transactions/mint_verify_collection_unverify_collection/5k71fZRpRagY45ZYu13Q8C3Bmw6KFPBkRmbBx2NuYk7roVtvM8P16WouCZtnkhRCyKyQHSgHKyTY92t9aq2tyLdd b/integration_tests/src/data/transactions/mint_verify_collection_unverify_collection/5k71fZRpRagY45ZYu13Q8C3Bmw6KFPBkRmbBx2NuYk7roVtvM8P16WouCZtnkhRCyKyQHSgHKyTY92t9aq2tyLdd new file mode 100644 index 000000000..b0ec5f4a0 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_verify_collection_unverify_collection/5k71fZRpRagY45ZYu13Q8C3Bmw6KFPBkRmbBx2NuYk7roVtvM8P16WouCZtnkhRCyKyQHSgHKyTY92t9aq2tyLdd differ diff --git a/integration_tests/src/data/transactions/mint_verify_collection_unverify_collection/5uWXt8JAhuP2XQ2nYJTq8Ndp34fdG3vmJ7DJnb3bE6iyrZZ6jeuN9w5jZvKrduMDu4zKyQU7A3JtswhKxE3hjKBk b/integration_tests/src/data/transactions/mint_verify_collection_unverify_collection/5uWXt8JAhuP2XQ2nYJTq8Ndp34fdG3vmJ7DJnb3bE6iyrZZ6jeuN9w5jZvKrduMDu4zKyQU7A3JtswhKxE3hjKBk new file mode 100644 index 000000000..5fe09e943 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_verify_collection_unverify_collection/5uWXt8JAhuP2XQ2nYJTq8Ndp34fdG3vmJ7DJnb3bE6iyrZZ6jeuN9w5jZvKrduMDu4zKyQU7A3JtswhKxE3hjKBk differ diff --git a/integration_tests/src/data/transactions/mint_verify_creator/37ts5SqpNazPTp26VfC4oeuXpXezKYkD9oarczPNaE8TUGG8msifnTYTBJiBZNBeAUGrNw85EEfwnR1t9SieKTdq b/integration_tests/src/data/transactions/mint_verify_creator/37ts5SqpNazPTp26VfC4oeuXpXezKYkD9oarczPNaE8TUGG8msifnTYTBJiBZNBeAUGrNw85EEfwnR1t9SieKTdq new file mode 100644 index 000000000..f6aabfb2b Binary files /dev/null and b/integration_tests/src/data/transactions/mint_verify_creator/37ts5SqpNazPTp26VfC4oeuXpXezKYkD9oarczPNaE8TUGG8msifnTYTBJiBZNBeAUGrNw85EEfwnR1t9SieKTdq differ diff --git a/integration_tests/src/data/transactions/mint_verify_creator/4xrw5UwQSxxPzVxge6fbtmgLNsT2amaGrwpZFE95peRbnHGpxWtS2fF7whXW2xma4i2KDXdneztJZCAtgGZKTw11 b/integration_tests/src/data/transactions/mint_verify_creator/4xrw5UwQSxxPzVxge6fbtmgLNsT2amaGrwpZFE95peRbnHGpxWtS2fF7whXW2xma4i2KDXdneztJZCAtgGZKTw11 new file mode 100644 index 000000000..83fecdc97 Binary files /dev/null and b/integration_tests/src/data/transactions/mint_verify_creator/4xrw5UwQSxxPzVxge6fbtmgLNsT2amaGrwpZFE95peRbnHGpxWtS2fF7whXW2xma4i2KDXdneztJZCAtgGZKTw11 differ diff --git a/integration_tests/src/general_scenario_tests.rs b/integration_tests/src/general_scenario_tests.rs new file mode 100644 index 000000000..70f0aa738 --- /dev/null +++ b/integration_tests/src/general_scenario_tests.rs @@ -0,0 +1,48 @@ +use crate::common::index_seed_events; +use crate::common::seed_nfts; +use crate::common::trim_test_name; +use crate::common::SeedEvent; +use crate::common::TestSetup; +use crate::common::TestSetupOptions; +use entities::api_req_params::GetAsset; +use function_name::named; +use itertools::Itertools; +use serial_test::serial; +use std::sync::Arc; +use tokio::sync::Mutex; +use tokio::task::JoinSet; + +#[tokio::test] +#[serial] +#[named] +async fn test_asset_parsing() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: None, + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_nfts(["843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} diff --git a/integration_tests/src/lib.rs b/integration_tests/src/lib.rs new file mode 100644 index 000000000..7220ca96c --- /dev/null +++ b/integration_tests/src/lib.rs @@ -0,0 +1,6 @@ +mod account_update_tests; +mod cnft_tests; +mod common; +mod general_scenario_tests; +mod mpl_core_tests; +mod regular_nft_tests; diff --git a/integration_tests/src/mpl_core_tests.rs b/integration_tests/src/mpl_core_tests.rs new file mode 100644 index 000000000..73d2dda06 --- /dev/null +++ b/integration_tests/src/mpl_core_tests.rs @@ -0,0 +1,1005 @@ +use function_name::named; + +use entities::api_req_params::{ + GetAsset, GetAssetsByAuthority, GetAssetsByGroup, GetAssetsByOwner, +}; + +use itertools::Itertools; + +use serial_test::serial; +use std::sync::Arc; +use tokio::sync::Mutex; +use tokio::task::JoinSet; + +use super::common::*; + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["x3hJtpU4AUsGejNvxzX9TKjcyNB1eYtDdDPWdeF6opr"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "x3hJtpU4AUsGejNvxzX9TKjcyNB1eYtDdDPWdeF6opr" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_collection() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["DHciVfQxHHM7t2asQJRjjkKbjvZ4PuG3Y3uiULMQUjJQ"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "DHciVfQxHHM7t2asQJRjjkKbjvZ4PuG3Y3uiULMQUjJQ" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_assets_by_authority() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + // one is Core asset, one is Core collection + // both have same authority + let seeds: Vec = seed_accounts([ + "9CSyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci", + "4FcFVJVPRsYoMjt8ewDGV5nipoK63SNrJzjrBHyXvhcz", + ]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "authorityAddress": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "sortBy": { + "sortBy": "updated", + "sortDirection": "asc" + }, + "page": 1, + "limit": 50 + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAssetsByAuthority = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_assets_by_authority(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_assets_by_group() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "JChzyyp1CnNz56tJLteQ5BsbngmWQ3JwcxLZrmuQA5b7", + "kTMCCKLTaZsnSReer12HsciwScUwhHyZyd9D9BwQF8k", + "EgzsppfYJmUet4ve8MnuHMyvSnj6R7LRmwsGEH5TuGhB", + "J2kazVRuZ33Po4PVyZGxiDYUMQ1eZiT5Xa13usRYo264", + ]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "groupKey": "collection", + "groupValue": "JChzyyp1CnNz56tJLteQ5BsbngmWQ3JwcxLZrmuQA5b7", + "sortBy": { + "sortBy": "updated", + "sortDirection": "asc" + }, + "page": 1, + "limit": 50 + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAssetsByGroup = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_assets_by_group(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_assets_by_owner() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "4FFhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb", + "9tsHoBrkSqBW5uMxKZyvxL6m9CCaz1a7sGEg8SuckUj", + ]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "ownerAddress": "7uScVQiT4vArB88dHrZoeVKWbtsRJmNp9r5Gce5VQpXS", + "sortBy": { + "sortBy": "updated", + "sortDirection": "asc" + }, + "page": 1, + "limit": 50 + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAssetsByOwner = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_assets_by_owner(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_edition() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["AejY8LGKAbQsrGZS1qgN4uFu99dJD3f8Js9Yrt7K3tCc"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "AejY8LGKAbQsrGZS1qgN4uFu99dJD3f8Js9Yrt7K3tCc" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_pubkey_in_rule_set() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["8H71x9Bhh9E9o3MZK4QnVC5MRFn1WZRf2Mc9w2wEbG5V"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "8H71x9Bhh9E9o3MZK4QnVC5MRFn1WZRf2Mc9w2wEbG5V" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_two_oracle_external_plugins() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["4aarnaiMVtGEp5nToRqBEUGtqY2F1gW2V8bBQe1rN5V9"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "4aarnaiMVtGEp5nToRqBEUGtqY2F1gW2V8bBQe1rN5V9" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_oracle_external_plugin_on_collection() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["Hvdg2FjMEndC4jxF2MJgKCaj5omLLZ19LNfD4p9oXkpE"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "Hvdg2FjMEndC4jxF2MJgKCaj5omLLZ19LNfD4p9oXkpE" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_oracle_multiple_lifecycle_events() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["3puHPHUHFXxhS7qPQa5YYTngzPbetoWbu7y2UxxB6xrF"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "3puHPHUHFXxhS7qPQa5YYTngzPbetoWbu7y2UxxB6xrF" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_oracle_custom_offset_and_base_address_config() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["9v2H5sDBXKmYkGHebfaWwdgBWuMTBVWQom3QeEcV8oJj"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "9v2H5sDBXKmYkGHebfaWwdgBWuMTBVWQom3QeEcV8oJj" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_oracle_no_offset() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["2TZpUiBiyMdwLFTKRshVMHK8anQK2W8XXbfUfyxR8yvc"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "2TZpUiBiyMdwLFTKRshVMHK8anQK2W8XXbfUfyxR8yvc" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "Do7rVGmVNa9wjsKNyjoa5phqriLER6HCqUQm5zyoTX3f", + "CWJDcrzxSDE7FeNRzMK1aSia7qoaUPrrGQ81E7vkQpq4", + ]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "groupKey": "collection", + "groupValue": "Do7rVGmVNa9wjsKNyjoa5phqriLER6HCqUQm5zyoTX3f", + "sortBy": { + "sortBy": "updated", + "sortDirection": "asc" + }, + "page": 1, + "limit": 50 + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAssetsByGroup = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_assets_by_group(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_multiple_internal_and_external_plugins() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["Aw7KSaeRECbjLW7BYTUtMwGkaiAGhxrQxdLnpLYRnmbB"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "Aw7KSaeRECbjLW7BYTUtMwGkaiAGhxrQxdLnpLYRnmbB" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_autograph_plugin() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["Hz4MSHgevYkpwF3cerDLuPJLQE3GZ5yDWu7vqmQGpRMU"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "Hz4MSHgevYkpwF3cerDLuPJLQE3GZ5yDWu7vqmQGpRMU" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_autograph_plugin_with_signature() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["4MCuZ5WNCgFnb7YJ2exj34qsLscmwd23WcoLBXBkaB7d"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "4MCuZ5WNCgFnb7YJ2exj34qsLscmwd23WcoLBXBkaB7d" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_verified_creators_plugin() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["AGyjcG9mBfYJFMZiJVkXr4iX7re6vkQ1Fw5grukA6Hiu"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "AGyjcG9mBfYJFMZiJVkXr4iX7re6vkQ1Fw5grukA6Hiu" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_verified_creators_plugin_with_signature() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["4iVX1oZj6nLAMerjXFw3UeGD4QU7BEaCscsWqD3zEH37"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "4iVX1oZj6nLAMerjXFw3UeGD4QU7BEaCscsWqD3zEH37" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_app_data_with_binary_data_and_owner_is_data_authority() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["6tqX4RuPCoD9dVKEJ51jykwBwjKh6runcHJSuSHpDPJU"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "6tqX4RuPCoD9dVKEJ51jykwBwjKh6runcHJSuSHpDPJU" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_app_data_with_json_data_and_update_authority_is_data_authority( +) { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["39XrhcVGuyq4HwTarxMCwDEMFtPBY5Nctxrvpvpdpe3g"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "39XrhcVGuyq4HwTarxMCwDEMFtPBY5Nctxrvpvpdpe3g" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_app_data_with_msg_pack_data_and_address_is_data_authority() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["2pY3t29uxpBotbmKbCsQNjYfML5DBoBshDgB7hpHu3XA"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "2pY3t29uxpBotbmKbCsQNjYfML5DBoBshDgB7hpHu3XA" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_collection_with_linked_app_data_with_binary_data_and_address_is_data_authority( +) { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["41thppJ4z9HnBNbFMLnztXS7seqBptYV1jG8UhxR4vK8"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "41thppJ4z9HnBNbFMLnztXS7seqBptYV1jG8UhxR4vK8" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_data_section_with_binary_data() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["BVjK8uvqUuH5YU6ThX6A7gznx2xi8BxshawbuFe1Y5Vr"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "BVjK8uvqUuH5YU6ThX6A7gznx2xi8BxshawbuFe1Y5Vr" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_collection_with_linked_app_data_with_json_data_and_owner_is_data_authority( +) { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["2aUn89GKuSjfYTeCH6GL1Y6CiUYqjvcgZehFGDJbhNeW"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "2aUn89GKuSjfYTeCH6GL1Y6CiUYqjvcgZehFGDJbhNeW" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_data_section_with_json_data() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["9vqNxe6M6t7PYo1gXrY18hVgDvCpouHSZ6vdDEFbybeA"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "9vqNxe6M6t7PYo1gXrY18hVgDvCpouHSZ6vdDEFbybeA" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_collection_with_linked_app_data_with_msg_pack_data_and_update_authority_is_data_authority( +) { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["53q1PCBy5KgzZfoHu6bnLWQFVmJtKyceP8DqNMhXWUaA"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "53q1PCBy5KgzZfoHu6bnLWQFVmJtKyceP8DqNMhXWUaA" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_mpl_core_get_asset_with_data_section_with_msg_pack_data() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_accounts(["EuXEcqHhF9jPxV9CKB5hjHC2TRo3xprdgk5vJTc9qRaY"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "EuXEcqHhF9jPxV9CKB5hjHC2TRo3xprdgk5vJTc9qRaY" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} diff --git a/integration_tests/src/regular_nft_tests.rs b/integration_tests/src/regular_nft_tests.rs new file mode 100644 index 000000000..3a473e929 --- /dev/null +++ b/integration_tests/src/regular_nft_tests.rs @@ -0,0 +1,196 @@ +use std::sync::Arc; + +use entities::api_req_params::{GetAsset, GetAssetBatch, GetAssetsByGroup, SearchAssets}; +use function_name::named; + +use itertools::Itertools; + +use serial_test::serial; +use tokio::{sync::Mutex, task::JoinSet}; + +use super::common::*; + +#[tokio::test] +#[serial] +#[named] +async fn test_reg_get_asset() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: None, + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_nfts(["CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_reg_get_asset_batch() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: None, + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_nfts([ + "HTKAVZZrDdyecCxzm3WEkCsG1GUmiqKm73PvngfuYRNK", + "2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd", + "5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1", + ]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + for (request, individual_test_name) in [ + ( + r#" + { + "ids": ["HTKAVZZrDdyecCxzm3WEkCsG1GUmiqKm73PvngfuYRNK", "2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd"] + } + "#, + "only-2", + ), + ( + r#" + { + "ids": ["2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd", "5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1"] + } + "#, + "only-2-different-2", + ), + ( + r#" + { + "ids": [ + "2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd", + "JECLQnbo2CCL8Ygn6vTFn7yeKn8qc7i51bAa9BCAJnWG", + "5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1" + ] + } + "#, + "2-and-a-missing-1", + ), + ] { + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAssetBatch = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_asset_batch(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(format!("{}-{}", name, individual_test_name), response); + } +} + +#[tokio::test] +#[serial] +#[named] +async fn test_reg_get_asset_by_group() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: None, + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_nfts([ + "7jFuJ73mBPDdLMvCYxzrpFTD9FeDudRxdXGDALP5Cp2W", + "BioVudBTjJnuDW22q62XPhGP87sVwZKcQ46MPSNz4gqi", + "Fm9S3FL23z3ii3EBBv8ozqLninLvhWDYmcHcHaZy6nie", + ]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "groupKey": "collection", + "groupValue": "8Rt3Ayqth4DAiPnW9MDFi63TiQJHmohfTWLMQFHi4KZH", + "sortBy": { + "sortBy": "updated", + "sortDirection": "asc" + }, + "page": 1, + "limit": 1 + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAssetsByGroup = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .get_assets_by_group(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_reg_search_assets() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: None, + clear_db: true, + }, + ) + .await; + + let seeds: Vec = seed_nfts([ + "2PfAwPb2hdgsf7xCKyU2kAWUGKnkxYZLfg5SMf4YP1h2", + "Dt3XDSAdXAJbHqvuycgCTHykKCC7tntMFGMmSvfBbpTL", + ]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "ownerAddress": "6Cr66AabRYymhZgYQSfTCo6FVpH18wXrMZswAbcErpyX", + "page": 1, + "limit": 2 + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: SearchAssets = serde_json::from_str(request).unwrap(); + let response = setup + .das_api + .search_assets(request, mutexed_tasks.clone()) + .await + .unwrap(); + insta::assert_json_snapshot!(name, response); +} diff --git a/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-metadata-updated.snap b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-metadata-updated.snap new file mode 100644 index 000000000..6663274fd --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-metadata-updated.snap @@ -0,0 +1,81 @@ +--- +source: integration_tests/src/account_update_tests.rs +assertion_line: 232 +expression: response_new_slot +snapshot_kind: text +--- +{ + "interface": "ProgrammableNFT", + "id": "843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://nftstorage.link/ipfs/bafybeihaky4htq6obur3ziksyanpdoqmrnefben3dbecddru52xh2sdkii/1232.json", + "files": [], + "metadata": { + "name": "Phantom Mage #1233", + "symbol": "PM", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "GMhP98kXvfziKNfq1nP99AbWtZAweLu3DFz5Vs5ZaVTy", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0555, + "basis_points": 555, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "5KtTmcxYYzK5mR7gEtjVX2eBBK1g7YjVbvsMBAbyf9uK", + "share": 0, + "verified": true + }, + { + "address": "HmkCMBPdY6MixHP8hxsM8G1UwJH3fBN2itWagnYoXASR", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "BzbdvwEkQKeghTY53aZxTYjUienhdbkNVkgrLV6cErke" + }, + "supply": null, + "mutable": false, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 18446744073709551615, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "BBxbujpLmQgqwu9p1t2S2AZrXuTGBiaUg3M7PUjuBExh", + "freeze_authority": "BBxbujpLmQgqwu9p1t2S2AZrXuTGBiaUg3M7PUjuBExh" + } +} diff --git a/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-token-updated.snap b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-token-updated.snap new file mode 100644 index 000000000..3f93cdf89 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-token-updated.snap @@ -0,0 +1,81 @@ +--- +source: integration_tests/src/account_update_tests.rs +assertion_line: 212 +expression: response_new_slot +snapshot_kind: text +--- +{ + "interface": "ProgrammableNFT", + "id": "843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://nftstorage.link/ipfs/bafybeihaky4htq6obur3ziksyanpdoqmrnefben3dbecddru52xh2sdkii/1232.json", + "files": [], + "metadata": { + "name": "Phantom Mage #1233", + "symbol": "PM", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "GMhP98kXvfziKNfq1nP99AbWtZAweLu3DFz5Vs5ZaVTy", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0555, + "basis_points": 555, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "5KtTmcxYYzK5mR7gEtjVX2eBBK1g7YjVbvsMBAbyf9uK", + "share": 0, + "verified": true + }, + { + "address": "HmkCMBPdY6MixHP8hxsM8G1UwJH3fBN2itWagnYoXASR", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": true, + "delegate": "1111111ogCyDbaRMvkdsHB3qfdyFYaG1WtRUAfdh", + "ownership_model": "single", + "owner": "1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 18446744073709551615, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "BBxbujpLmQgqwu9p1t2S2AZrXuTGBiaUg3M7PUjuBExh", + "freeze_authority": "BBxbujpLmQgqwu9p1t2S2AZrXuTGBiaUg3M7PUjuBExh" + } +} diff --git a/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-with-all-updates.snap b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-with-all-updates.snap new file mode 100644 index 000000000..3ca111f6b --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-with-all-updates.snap @@ -0,0 +1,81 @@ +--- +source: integration_tests/src/account_update_tests.rs +assertion_line: 266 +expression: "setup.das_api.get_asset(request.clone(), mutexed_tasks.clone()).await.unwrap()" +snapshot_kind: text +--- +{ + "interface": "ProgrammableNFT", + "id": "843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://nftstorage.link/ipfs/bafybeihaky4htq6obur3ziksyanpdoqmrnefben3dbecddru52xh2sdkii/1232.json", + "files": [], + "metadata": { + "name": "Phantom Mage #1233", + "symbol": "PM", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "GMhP98kXvfziKNfq1nP99AbWtZAweLu3DFz5Vs5ZaVTy", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0555, + "basis_points": 555, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "5KtTmcxYYzK5mR7gEtjVX2eBBK1g7YjVbvsMBAbyf9uK", + "share": 0, + "verified": true + }, + { + "address": "HmkCMBPdY6MixHP8hxsM8G1UwJH3fBN2itWagnYoXASR", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": true, + "delegate": "1111111ogCyDbaRMvkdsHB3qfdyFYaG1WtRUAfdh", + "ownership_model": "single", + "owner": "1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM" + }, + "supply": null, + "mutable": false, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 18446744073709551615, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "BBxbujpLmQgqwu9p1t2S2AZrXuTGBiaUg3M7PUjuBExh", + "freeze_authority": "BBxbujpLmQgqwu9p1t2S2AZrXuTGBiaUg3M7PUjuBExh" + } +} diff --git a/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates.snap b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates.snap new file mode 100644 index 000000000..be85df535 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates.snap @@ -0,0 +1,81 @@ +--- +source: integration_tests/src/account_update_tests.rs +assertion_line: 184 +expression: response +snapshot_kind: text +--- +{ + "interface": "ProgrammableNFT", + "id": "843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://nftstorage.link/ipfs/bafybeihaky4htq6obur3ziksyanpdoqmrnefben3dbecddru52xh2sdkii/1232.json", + "files": [], + "metadata": { + "name": "Phantom Mage #1233", + "symbol": "PM", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "GMhP98kXvfziKNfq1nP99AbWtZAweLu3DFz5Vs5ZaVTy", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0555, + "basis_points": 555, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "5KtTmcxYYzK5mR7gEtjVX2eBBK1g7YjVbvsMBAbyf9uK", + "share": 0, + "verified": true + }, + { + "address": "HmkCMBPdY6MixHP8hxsM8G1UwJH3fBN2itWagnYoXASR", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "BzbdvwEkQKeghTY53aZxTYjUienhdbkNVkgrLV6cErke" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 18446744073709551615, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "BBxbujpLmQgqwu9p1t2S2AZrXuTGBiaUg3M7PUjuBExh", + "freeze_authority": "BBxbujpLmQgqwu9p1t2S2AZrXuTGBiaUg3M7PUjuBExh" + } +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__asset_decompress.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__asset_decompress.snap new file mode 100644 index 000000000..ca04465a0 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__asset_decompress.snap @@ -0,0 +1,96 @@ +--- +source: integration_tests/src/cnft_tests.rs +assertion_line: 41 +expression: response +snapshot_kind: text +--- +{ + "interface": "V1_NFT", + "id": "Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://baze.one/posts/rMb9dU7X2lSu/m", + "files": [], + "metadata": { + "name": "B-BCKSNW-4", + "symbol": "BCKSNW", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "5S5CHHQ69Q2gdRVRV45TVnZpy6eExzq5Zw5BRQozC3Ap", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "2MJeqBXcRmy3PXvQP5RRPqeig2CTfnXv5nGT3wRD1xW5", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.1, + "basis_points": 1000, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "8dpwoPxMkCd9LnsDEbrRuBL5sw3mqcWLf9vPcwgpeRMS", + "share": 100, + "verified": false + }, + { + "address": "2kKisVa2VmNk6iA2avQpZdDXtDGBWgzEE9ib7iWmKemw", + "share": 0, + "verified": false + }, + { + "address": "94huX4Ya45pA41AfeBkqadbBFjgFDrv5riZQXVLqsoVT", + "share": 0, + "verified": true + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "HkdmoXct4zKyAfgZgpuavKzmuvf2Z5UZ35v1UUW3Z9Jk" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 253 + }, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 0, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "Efi1oabSQtJ2uNC83d6yjKW4Z94FuxoX9nYmadJFSRSe", + "freeze_authority": "Efi1oabSQtJ2uNC83d6yjKW4Z94FuxoX9nYmadJFSRSe" + } +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__cnft_owners_table-base.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__cnft_owners_table-base.snap new file mode 100644 index 000000000..5f3728cc4 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__cnft_owners_table-base.snap @@ -0,0 +1,92 @@ +--- +source: integration_tests/src/cnft_tests.rs +assertion_line: 180 +expression: response +snapshot_kind: text +--- +{ + "total": 1, + "limit": 5, + "page": 1, + "items": [ + { + "interface": "V1_NFT", + "id": "4k41XV45LUyX2iRSdw8bvwPmKF9y7SNLi9imUGWc3ADS", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://arweave.net/rtwIkSB6YhNjaUjnSxUiiDV6BwJ_W2pLw8Mfhj6lvBo", + "files": [], + "metadata": { + "name": "Rolling Canvas #1 | Sketch Paper", + "symbol": "", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "EZ2pnS9heCAV7V6evtWeucEKdUaB6Y4Vca4W6ZuBJN81", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "APHE858V4tkWqVATytfgTupRJBeg8aQfwCEvssd9kZVc", + "creator_hash": "8KdDUqVPXcyvBch2atwAxEyf8Ym3TfB5khSkZRSgoYxR", + "asset_hash": "3TP6JZ2e3CPzoktSC1cx5K8GM2dWrS6F8xuM51m78Tqr", + "tree": "8Uyk1cZGDcytRBdyKkqHJhTAvSPzSNDDEJTacWUEW8p6", + "seq": 258421, + "leaf_id": 170433 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "STKM3PzeFc6kdJiMXyQ3pRDa3ERCEGgM9NbPPGb2Rft", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.065, + "basis_points": 650, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "STK8wk9npcTtCLHEeLc3MkfqvTQ3Pai37qbZBxuKV2Z", + "share": 0, + "verified": true + }, + { + "address": "WoMbXFtdfH8crq2Zi7bQhfGx2Gv8EN4saP13gcdUGog", + "share": 23, + "verified": false + }, + { + "address": "6sYxGBdpzfABtHRrHERFvS92y9yv4Sxwo8EFCvjU2vv7", + "share": 77, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "F3MdnVQkRSy56FSKroYawfMk1RJFo42Quzz8VTmFzPVz" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": null + }, + "mutable": true, + "burnt": false + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__cnft_owners_table-with_different_owner.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__cnft_owners_table-with_different_owner.snap new file mode 100644 index 000000000..2508f8d05 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__cnft_owners_table-with_different_owner.snap @@ -0,0 +1,10 @@ +--- +source: integration_tests/tests/integration_tests/cnft_tests.rs +expression: response +--- +{ + "total": 0, + "limit": 5, + "page": 1, + "items": [] +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__cnft_scenario_mint_update_metadata.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__cnft_scenario_mint_update_metadata.snap new file mode 100644 index 000000000..7247be262 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__cnft_scenario_mint_update_metadata.snap @@ -0,0 +1,72 @@ +--- +source: integration_tests/tests/integration_tests/cnft_tests.rs +expression: response +--- +{ + "interface": "V1_NFT", + "id": "FLFoCw2RBbxiw9rbEeqPWJ5rasArD9kTCKWEJirTexsU", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://metaplex.com", + "files": [], + "metadata": { + "name": "test", + "symbol": "TST", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "9K8HEU5zynrCyL9CyuvCX9qUU58avinJsu5E59gyRnB8", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "3TV3EEhP7QrfHJ1GnsuqVo3mTbPsLMgtmqV3tLnh3HQq", + "creator_hash": "2gYtfk9rgoFMgepLuQkMiPAmXnmG7kFh9VgMDPPcHMjY", + "asset_hash": "4Un9hicHokmeRx3mcqzuVd1k1zAzjqXymm7xo8xqJTeK", + "tree": "EzppnpWWMKg4egUkfe86aQQyYabFL9jPPWTrZjniDHxv", + "seq": 2, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc", + "share": 50, + "verified": true + }, + { + "address": "3Le8mq2Y7kpAwAKmTRyr7n6vcM9hdDuxmiWA3tUYi4Nw", + "share": 50, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 0 + }, + "mutable": true, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__cnft_scenario_mint_update_metadata_remove_creators.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__cnft_scenario_mint_update_metadata_remove_creators.snap new file mode 100644 index 000000000..713adb096 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__cnft_scenario_mint_update_metadata_remove_creators.snap @@ -0,0 +1,61 @@ +--- +source: integration_tests/tests/integration_tests/cnft_tests.rs +expression: response +--- +{ + "interface": "V1_NFT", + "id": "Gi4fAXJdnWYrEPjQm3wnW9ctgG7zJjB67zHDQtRGRWyZ", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://metaplex.com", + "files": [], + "metadata": { + "name": "test", + "symbol": "TST", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2GuRg5JGumFcgKmZvp7fc5be9UsaNihP42WPEziZ9mEA", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "FsRoJdPqF4xJczL35QG1aQQG7g7DBZK1VF9Qs8zexssc", + "creator_hash": "EKDHSGbrGztomDfuiV4iqiZ6LschDJPsFiXjZ83f92Md", + "asset_hash": "CbsvyFdcQ8SyGaVpqNE5zjLfSbgnuKDnJZEyQgC4P5Wt", + "tree": "8PkWaY1BYd6ysBZt3JEwuAXjNrZE6omW18cKEJzbyaw4", + "seq": 2, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 0 + }, + "mutable": true, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_delegate_transfer.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_delegate_transfer.snap new file mode 100644 index 000000000..db4ea6f15 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_delegate_transfer.snap @@ -0,0 +1,72 @@ +--- +source: integration_tests/tests/integration_tests/cnft_tests.rs +expression: response +--- +{ + "interface": "V1_NFT", + "id": "77wWrvhgEkkQZQVA2hoka1JTsjG3w7BVzvcmqxDrVPWE", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://metaplex.com", + "files": [], + "metadata": { + "name": "test", + "symbol": "TST", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "E9uEW5uMNDhLGracp5XtvarF3udV3p6hemoXfBDB82QF", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "F5iDDHxd2DVZa5eZCqE2a91QLadea4ygJwM18UUut6dj", + "creator_hash": "EF57j46BT5Cynwija675rq59iDN1ZapYsJDnMHqta463", + "asset_hash": "BvK9Z52tdghAc8ohBdSbeJfbfHgirzJEKkaNdexBHHQ9", + "tree": "2AsVtpcSDi4nUhMEvtZPApGvucu6kP4qrKV6SZ9hG3WT", + "seq": 3, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc", + "share": 55, + "verified": false + }, + { + "address": "GLUJH72y4ZoBU8qqHrQnDBtFsocF9DpeQNM1NS1q87rU", + "share": 45, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "DjpoGceP1EJa2qEaZq8kS1GHTeDDL3AXJcSwLtrwqNj3" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 0 + }, + "mutable": false, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_no_json_uri.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_no_json_uri.snap new file mode 100644 index 000000000..953926347 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_no_json_uri.snap @@ -0,0 +1,75 @@ +--- +source: integration_tests/src/cnft_tests.rs +assertion_line: 41 +expression: response +snapshot_kind: text +--- +{ + "interface": "V1_NFT", + "id": "DFRJ4PwAze1mMQccRmdyc46yQpEVd4FPiwtAVgzGCs7g", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "", + "files": [], + "metadata": { + "name": "", + "symbol": "", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "GMdA1G1CDKNaiE1xGNJnB2KyNJVLCzvEcZ6P62SYSL3Z", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "6V4Uaz1QF1cnwAPGegnAZXd1oHcG3vydcXPDz2Rzpnzi", + "creator_hash": "AVgJYLS3Mex1pAkfXikHzfWF4Pvf924ESXswRTXRVMik", + "asset_hash": "HKvUDBDmeFo727WTYiE9SdR7bqpkQeJecZQoJbUep9Pg", + "tree": "67B3xvadpVeBJ6MT5tQqBqntrBeLNjrWbcF6uEAH1EEi", + "seq": 4, + "leaf_id": 3 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "GG6KuiAjG4hhg2AYdtjSoCG1QesgRFjTaok5MLxmAF3k", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "7WS6MiWdcqN7woEu8Vw8vGTmwxudPqQFts84jkvsDx9W", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "347kSWBNPm6Z3VFZZXAcKnT4T7v4WVQYw5b2xPJxp6zA" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": null + }, + "mutable": true, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_redeem.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_redeem.snap new file mode 100644 index 000000000..d0e777d7c --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_redeem.snap @@ -0,0 +1,85 @@ +--- +source: integration_tests/src/cnft_tests.rs +assertion_line: 41 +expression: response +snapshot_kind: text +--- +{ + "interface": "V1_NFT", + "id": "Az9QTysJj1LW1F7zkYF21HgBj3FRpq3zpxTFdPnAJYm8", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://baze.one/posts/rMb9dU7X2lSu/m", + "files": [], + "metadata": { + "name": "B-BCKSNW-4", + "symbol": "BCKSNW", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "6uqVdTq4MSmr6FKdy5CCFnCzNNLZTw8ro7TdxC9UeKNr", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "11111111111111111111111111111111", + "creator_hash": "11111111111111111111111111111111", + "asset_hash": "11111111111111111111111111111111", + "tree": "tree9kmh23Qwa9K8sZ9rQtYshSwKA85CTEvw5bvTrau", + "seq": 62, + "leaf_id": 58 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "2MJeqBXcRmy3PXvQP5RRPqeig2CTfnXv5nGT3wRD1xW5", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.1, + "basis_points": 1000, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "8dpwoPxMkCd9LnsDEbrRuBL5sw3mqcWLf9vPcwgpeRMS", + "share": 100, + "verified": false + }, + { + "address": "2kKisVa2VmNk6iA2avQpZdDXtDGBWgzEE9ib7iWmKemw", + "share": 0, + "verified": false + }, + { + "address": "94huX4Ya45pA41AfeBkqadbBFjgFDrv5riZQXVLqsoVT", + "share": 0, + "verified": true + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "HkdmoXct4zKyAfgZgpuavKzmuvf2Z5UZ35v1UUW3Z9Jk" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": null + }, + "mutable": true, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_redeem_cancel_redeem.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_redeem_cancel_redeem.snap new file mode 100644 index 000000000..10beb8031 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_redeem_cancel_redeem.snap @@ -0,0 +1,72 @@ +--- +source: integration_tests/tests/integration_tests/cnft_tests.rs +expression: response +--- +{ + "interface": "V1_NFT", + "id": "5WaPA7HLZKGg56bcKiroMXAzHmB1mdxK3QTeCDepLkiK", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://metaplex.com", + "files": [], + "metadata": { + "name": "test", + "symbol": "TST", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "JBLMxYC5JMW9v2yGuSM2xtFqsD7omw2gQS2YmcXPz3dL", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "DLrNQJ6gJbfsqBLe3QXMVc8j2kjVtFqFbXd6rFsoFVyG", + "creator_hash": "58PDE6bzJweEP4NauMXugPYfjkZMzGGptDyH7rP4ZSEq", + "asset_hash": "AqMe3832WCXHbaaKAPs5MFo55oWBEKKN5iigTaoRtHFr", + "tree": "Hk1644vHxBJ33ZZ1QW5P1zpex9qJ22RdHCwk46aaR16k", + "seq": 3, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc", + "share": 55, + "verified": false + }, + { + "address": "GXxPiX5aAQJPSnNoJ4WwwNwbjmUEPdeCiSQoDgXkg2X7", + "share": 45, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 0 + }, + "mutable": false, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_to_collection_unverify_collection.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_to_collection_unverify_collection.snap new file mode 100644 index 000000000..851bdc01a --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_to_collection_unverify_collection.snap @@ -0,0 +1,80 @@ +--- +source: integration_tests/src/cnft_tests.rs +assertion_line: 41 +expression: response +snapshot_kind: text +--- +{ + "interface": "V1_NFT", + "id": "2gEbvG3Cb6JRaGWAx5e85Bf5z4u37EURBeyPBqXDzZoY", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://metaplex.com", + "files": [], + "metadata": { + "name": "test", + "symbol": "TST", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "AR6is91KeUbiYrSCrHAQfBk3J2jApxefAiWMm55nD9o5", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "8M8DpVvA1aGNMV6xMViDyEF9oS89VKqbWsxBfUNkGGQ9", + "creator_hash": "4q4TdVGLUpKPvHqZT57xM66gZyAjViQdPPsZKoHPRZni", + "asset_hash": "2MYeWSm5mv3FXUyTDeD247uCfEMzpDRaU2ZYMMhRDjga", + "tree": "ChxiXfVhkP7LYminiaFJRLQmRigRmLPw3iduEAvjpz3L", + "seq": 2, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "DdyrHTfN4wskbqTm3NDbAvRDsqiewzkuMboMRYJmNG6A", + "verified": false + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc", + "share": 55, + "verified": false + }, + { + "address": "CxPipfKpggvxG5ne5SD23c2JFz3TMNqrN1Vjr1vj8NFK", + "share": 45, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 0 + }, + "mutable": false, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_transfer_burn.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_transfer_burn.snap new file mode 100644 index 000000000..1b3a83390 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_transfer_burn.snap @@ -0,0 +1,72 @@ +--- +source: integration_tests/tests/integration_tests/cnft_tests.rs +expression: response +--- +{ + "interface": "V1_NFT", + "id": "8vw7tdLGE3FBjaetsJrZAarwsbc8UESsegiLyvWXxs5A", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://metaplex.com", + "files": [], + "metadata": { + "name": "test", + "symbol": "TST", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "9nyYr9vyDZU8SnVNgQ58kcEtcpXUj57vkzwd4UjbZrS8", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "GFsq2BCwZdQaErGkQD5my36tpC8Cg38HFuMvbM6ShC2h", + "creator_hash": "2ct1dcDUbFDHfpdtuZ5vdkPB1ewjFg9pxRi58Yj4huCj", + "asset_hash": "4R8dTjLuyetziKZFeM45Qa1XuGn3vd4LdJkGPEJhotAq", + "tree": "CnBt2TJrw1dXUBfVBf2Ah4Cz5s84jhBG7cejdPF19Eyh", + "seq": 3, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc", + "share": 55, + "verified": false + }, + { + "address": "CXiFtmdyYxeX5hydprYLaMjH4h2r8NBQ8NTezzfemGv5", + "share": 45, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "BJ5jYD1AGSTR7fvZyvUoj59PY7cF8Ki2j1gB36PJFuTD" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 0 + }, + "mutable": false, + "burnt": true +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_transfer_mpl_programs.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_transfer_mpl_programs.snap new file mode 100644 index 000000000..fa1dcd0d2 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_transfer_mpl_programs.snap @@ -0,0 +1,62 @@ +--- +source: integration_tests/tests/integration_tests/cnft_tests.rs +assertion_line: 36 +expression: response +--- +{ + "interface": "V1_NFT", + "id": "ZzTjJVwo66cRyBB5zNWNhUWDdPB6TqzyXDcwjUnpSJC", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/my-nft.json", + "files": [], + "metadata": { + "name": "My NFT", + "symbol": "", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "6aGkAY47sgJopPFYYsQaTrsQvV2gAqZzZgyVVT12VzwF", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "HB6sKWxroCdwkChjxckW3CF3fWupZHhPEua62GF46Ljs", + "creator_hash": "EKDHSGbrGztomDfuiV4iqiZ6LschDJPsFiXjZ83f92Md", + "asset_hash": "GrLckNqQqJK6qKuNP6vhhhnfHCTXpoBXVjDg7JhJ4wF1", + "tree": "DBMSBx9wYU5WTbYYq9shqK7pffR1EhbzjiHAGd7vAp7Z", + "seq": 2, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "CrPXWqKpoHUDKSPyr6S4hy2wjFC1Nm8NmKVow4zYRDXf" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": null + }, + "mutable": true, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_transfer_noop.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_transfer_noop.snap new file mode 100644 index 000000000..c2083dfac --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_transfer_noop.snap @@ -0,0 +1,75 @@ +--- +source: integration_tests/src/cnft_tests.rs +assertion_line: 41 +expression: response +snapshot_kind: text +--- +{ + "interface": "V1_NFT", + "id": "7myVr8fEG52mZ3jAwgz88iQRWsuzuVR2nfH8n2AXnBxE", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://arweave.net/S40vvBVuCvZhAWI3kvk3QreUqVAvR0AaUDObOhB8WIY", + "files": [], + "metadata": { + "name": "Golden Azurite", + "symbol": "OEAs", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "EDAR6p4AUbv9SpD1pDm3gxdSAivdqsHxsf6V9pBc532U", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "B2tu4duCUPequnXh7DxbMnLeLcACHbDCQn3g34s5Cvbx", + "creator_hash": "6UiSCAv4r66MALaqhNE7qdTK84qKk1yJqR4UYtT8qEQ1", + "asset_hash": "45gbVkkwEaKhFatRQf1GPbkJVdhtVwefEzdCi9aNpHG8", + "tree": "4r2zZHZvC4Se1KUcCcyCM4ZoFQNGZm2M5FMmUypFocAP", + "seq": 43, + "leaf_id": 5 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "BwwjnxTHeVWdFieDWmoezta19q1NiwcNNyoon9S38bkM", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "4gETqgEwFLkXX9yk6qBszA6LMjC2kRyyERXsAr2rwhwf", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "4fzLUnMx7CrmzVqgySNFkup1ACgZNnBWLx9sfpsaLEcK" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": null + }, + "mutable": true, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_transfer_transfer.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_transfer_transfer.snap new file mode 100644 index 000000000..28a0ca830 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_transfer_transfer.snap @@ -0,0 +1,72 @@ +--- +source: integration_tests/tests/integration_tests/cnft_tests.rs +expression: response +--- +{ + "interface": "V1_NFT", + "id": "EcLv3bbLYr2iH5PVEuf9pJMRdDCvCqwSx3Srz6AeKjAe", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://metaplex.com", + "files": [], + "metadata": { + "name": "test", + "symbol": "TST", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "J6uMQYzyeFXE5tvnKtmAUyHeiKEWiELARceY8HhSuBF6", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "68ZcMZF52Bv3mayj4AjPFdVoMhrE9Ap18CQRuUhB2pWu", + "creator_hash": "2RsQnw2DXTrKWT2GAd7LEnvgHuL8gZ8C2wk89dcW31yb", + "asset_hash": "5gZXmV7yzWMcKk2n24t4tXECHJffRX179ZoxVaheADpp", + "tree": "AdKerrR54h9H1zNoyex9f6svuQQSeKrSEufiGtA5zu8v", + "seq": 3, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc", + "share": 55, + "verified": false + }, + { + "address": "AhKyxn99oZN7MhdWfU8S9qmmBZBkQYgr2KaNQEJWVHcd", + "share": 45, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "8WUbLiKgdqA2jcA8xR2Qm1nxeQTHSiz2HTyZDb7nZu6n" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 0 + }, + "mutable": false, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_verify_collection.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_verify_collection.snap new file mode 100644 index 000000000..9897cd283 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_verify_collection.snap @@ -0,0 +1,80 @@ +--- +source: integration_tests/src/cnft_tests.rs +assertion_line: 41 +expression: response +snapshot_kind: text +--- +{ + "interface": "V1_NFT", + "id": "2WjoMU1hBGXv8sKcxQDGnu1tgMduzdZEmEEGjh8MZYfC", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://metaplex.com", + "files": [], + "metadata": { + "name": "test", + "symbol": "TST", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2322PBMYe82eqDMmzJzUdWYWjhDzr1qGaujs32sNnHp1", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "5qnPtMuhWzj8rRviKV1YRitUxgt4iFqFkpvyuBn5rTKW", + "creator_hash": "FM4zS99NvwRZm5UGgcJW1pQEWDtpgH4jaiCaTkMn6mxD", + "asset_hash": "DaxPr3AmJoqwJxgeeTSXZjM8eXzj2aEkbAWChJEhh53e", + "tree": "F7gUS2b9mMKHWz6yebb447tAt9wUZbwtwqFabJbhi4V2", + "seq": 2, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "4zapNXifB7Lz5XGUtsYQ3gsEujK2dqFw4mE9NY57NrtD", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc", + "share": 55, + "verified": false + }, + { + "address": "8xJtnbXezwkUwvj5tMUUK7ortAPa3Sm5sJPvTUhXhKR5", + "share": 45, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 0 + }, + "mutable": false, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_verify_collection_unverify_collection.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_verify_collection_unverify_collection.snap new file mode 100644 index 000000000..066eb392b --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_verify_collection_unverify_collection.snap @@ -0,0 +1,80 @@ +--- +source: integration_tests/src/cnft_tests.rs +assertion_line: 41 +expression: response +snapshot_kind: text +--- +{ + "interface": "V1_NFT", + "id": "BiHHJ1gKV4exTjPe7PE6aydgMVqRUzzz8aeWYCGhZJ4s", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://metaplex.com", + "files": [], + "metadata": { + "name": "test", + "symbol": "TST", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "FB3wCJHTxrKFrzHXfkf2wZPYFNioKhmSvEbackvuCfJZ", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "8wZ2jDVA3gdirM6QtZrQHBcuJsLM4TZusEAKEEfSvgKw", + "creator_hash": "A8KEBkNKGsouiKAsE5ARHqNHGTb7ZzPi6LHesaZCL4Aw", + "asset_hash": "BpyVeWUUM7jaT77R2o7eZ19aQMq6ihtNY7GctWt6yXCZ", + "tree": "7BgR46hR4ZEERegqpUyA7H11d7k43skjs9nLNX6vVUQW", + "seq": 3, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "FhBQpV7TdZHZe6rrS9hVhkmBZbjE6RDkD5QJ8XgcYHYX", + "verified": false + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc", + "share": 55, + "verified": false + }, + { + "address": "3aUG6gNsFmtvEMhHf1D58f6eJavABBiPHdHJvY7odGKu", + "share": 45, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 0 + }, + "mutable": false, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_verify_creator.snap b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_verify_creator.snap new file mode 100644 index 000000000..a579b883f --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__cnft_tests__mint_verify_creator.snap @@ -0,0 +1,72 @@ +--- +source: integration_tests/tests/integration_tests/cnft_tests.rs +expression: response +--- +{ + "interface": "V1_NFT", + "id": "5rmTyghEuZhRTB77L3KqGMy6h5RpSNWNLj14avbxGNKB", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://metaplex.com", + "files": [], + "metadata": { + "name": "test", + "symbol": "TST", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "CytP31N7oz9etRF9ddgav6JzEhcWuZXTSTwh3jP7zRa", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "31vt7MSZzRykZN1FjQktuk8oaWCVL9SzfMyX9Q2RtTaa", + "creator_hash": "4GAv4MMcgC7xjLuLHSMxxoRpp2cCbz52f9vhYK7GNZMX", + "asset_hash": "6mfR173y5dNp19LzqQXruUWrbPbFFC9TGbqUN1ArNsBm", + "tree": "6XLLEYd2AtUFAP2ysaTGoUj3QmzPumqH1MX2n7DzFrBU", + "seq": 2, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc", + "share": 55, + "verified": false + }, + { + "address": "rU9dhXotNmpzzBHmQZKAGHGSvcLN6VB2j4TC5rTHPz3", + "share": 45, + "verified": true + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "Fq4HDXfutKjEZ7zZP2JmKboSm2ZsYsKEJ7BLQAfrpNcc" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 0 + }, + "mutable": false, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__general_scenario_tests__asset_parsing.snap b/integration_tests/src/snapshots/integration_tests__general_scenario_tests__asset_parsing.snap new file mode 100644 index 000000000..5cd4f3e8f --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__general_scenario_tests__asset_parsing.snap @@ -0,0 +1,81 @@ +--- +source: integration_tests/src/general_scenario_tests.rs +assertion_line: 37 +expression: response +snapshot_kind: text +--- +{ + "interface": "ProgrammableNFT", + "id": "843gdpsTE4DoJz3ZoBsEjAqT8UgAcyF5YojygGgGZE1f", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://nftstorage.link/ipfs/bafybeihaky4htq6obur3ziksyanpdoqmrnefben3dbecddru52xh2sdkii/1232.json", + "files": [], + "metadata": { + "name": "Phantom Mage #1233", + "symbol": "PM", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "GMhP98kXvfziKNfq1nP99AbWtZAweLu3DFz5Vs5ZaVTy", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0555, + "basis_points": 555, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "5KtTmcxYYzK5mR7gEtjVX2eBBK1g7YjVbvsMBAbyf9uK", + "share": 0, + "verified": true + }, + { + "address": "HmkCMBPdY6MixHP8hxsM8G1UwJH3fBN2itWagnYoXASR", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "BzbdvwEkQKeghTY53aZxTYjUienhdbkNVkgrLV6cErke" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 0, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "BBxbujpLmQgqwu9p1t2S2AZrXuTGBiaUg3M7PUjuBExh", + "freeze_authority": "BBxbujpLmQgqwu9p1t2S2AZrXuTGBiaUg3M7PUjuBExh" + } +} diff --git a/integration_tests/src/snapshots/integration_tests__general_scenario_tests__creators_reordering.snap b/integration_tests/src/snapshots/integration_tests__general_scenario_tests__creators_reordering.snap new file mode 100644 index 000000000..2ed4bf732 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__general_scenario_tests__creators_reordering.snap @@ -0,0 +1,83 @@ +--- +source: integration_tests/tests/integration_tests/general_scenario_tests.rs +expression: response +--- +{ + "interface": "ProgrammableNFT", + "id": "ANt9HygtvFmFJ1UcAHFLnM62JJWjk8fujMzjGfpKBfzk", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://shdw-drive.genesysgo.net/3sRtma7VsvgUrGroHkfQga8ipBmv4PAySFwS6rayXxbU/761158362554895.json", + "files": [], + "metadata": { + "name": "BVDCAT #5129", + "symbol": "BVD", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "4xJDyBw7RHgfzG2inyTHs8FP7wyqFSywcLKwX8GK6X55", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "HEAKpy99JuLhfinuLgji757JxHvPizBo7WaXvWBYc3kz" + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "8Jhy62JeG4rgPu4Q2tn3Q3eZ8XUZmHhYDKpVJkQ8RFhe", + "share": 0, + "verified": true + }, + { + "address": "9sJ3GKyTpBaNJ9CVFV6DecV556G1jU9L32kJASxzWsQA", + "share": 10, + "verified": false + }, + { + "address": "BDaobvsTU8Eu3R4sx1vLufKiToaZL3MDTHxPgHgvGWC7", + "share": 10, + "verified": false + }, + { + "address": "F9xfmpggwgqH7ASZzNre8TxZztZwCogPBE8aQCNBLkBn", + "share": 80, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "AZgXpkRSetUJHy6C3NBvG6jNe49MpgrkZ2RkdMkjCjkW" + }, + "supply": null, + "mutable": true, + "burnt": false +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin.snap new file mode 100644 index 000000000..c519a5166 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin.snap @@ -0,0 +1,78 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 527 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "Hz4MSHgevYkpwF3cerDLuPJLQE3GZ5yDWu7vqmQGpRMU", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3386160, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": { + "autograph": { + "authority": { + "type": "Owner", + "address": null + }, + "index": 0, + "offset": 119, + "data": { + "signatures": [] + } + } + }, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin_with_signature.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin_with_signature.snap new file mode 100644 index 000000000..52fbe25d0 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin_with_signature.snap @@ -0,0 +1,83 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 558 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "4MCuZ5WNCgFnb7YJ2exj34qsLscmwd23WcoLBXBkaB7d", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "ACxrDWeCPic6voU6a8vyadpL8nSW15Un5vT76LDpxD4N" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3650640, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": { + "autograph": { + "authority": { + "type": "Owner", + "address": null + }, + "index": 0, + "offset": 119, + "data": { + "signatures": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "message": "hi" + } + ] + } + } + }, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset.snap new file mode 100644 index 000000000..658b71b32 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset.snap @@ -0,0 +1,117 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 47 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "x3hJtpU4AUsGejNvxzX9TKjcyNB1eYtDdDPWdeF6opr", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "DHciVfQxHHM7t2asQJRjjkKbjvZ4PuG3Y3uiULMQUjJQ", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0005, + "basis_points": 5, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": false, + "delegated": true, + "delegate": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 4019520, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": { + "royalties": { + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "index": 0, + "offset": 119, + "data": { + "creators": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "percentage": 100 + } + ], + "basis_points": 5, + "rule_set": "None" + } + }, + "freeze_delegate": { + "authority": { + "type": "Address", + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "index": 1, + "offset": 160, + "data": { + "frozen": false + } + }, + "transfer_delegate": { + "authority": { + "type": "Owner", + "address": null + }, + "index": 2, + "offset": 162, + "data": {} + } + }, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_binary_data_and_owner_is_data_authority.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_binary_data_and_owner_is_data_authority.snap new file mode 100644 index 000000000..3a2f7e7fa --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_binary_data_and_owner_is_data_authority.snap @@ -0,0 +1,86 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 651 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "6tqX4RuPCoD9dVKEJ51jykwBwjKh6runcHJSuSHpDPJU", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "HKwwF4sPVYFPgqEgxth4GZRZjJ9o4E3wA8eu2QM5Vt3H" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3594960, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "data_len": 13, + "index": 0, + "offset": 119, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "type": "AppData", + "adapter_config": { + "data_authority": { + "type": "Owner", + "address": null + }, + "schema": "Binary" + }, + "data_offset": 122, + "data": "SGVsbG8sIHdvcmxkIQ==" + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_json_data_and_update_authority_is_data_authority.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_json_data_and_update_authority_is_data_authority.snap new file mode 100644 index 000000000..f2c106a06 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_json_data_and_update_authority_is_data_authority.snap @@ -0,0 +1,89 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 683 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "39XrhcVGuyq4HwTarxMCwDEMFtPBY5Nctxrvpvpdpe3g", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "HKwwF4sPVYFPgqEgxth4GZRZjJ9o4E3wA8eu2QM5Vt3H" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3755040, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "data_len": 36, + "index": 0, + "offset": 119, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "type": "AppData", + "adapter_config": { + "data_authority": { + "type": "UpdateAuthority", + "address": null + }, + "schema": "Json" + }, + "data_offset": 122, + "data": { + "message": "Hello", + "target": "world" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_msg_pack_data_and_address_is_data_authority.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_msg_pack_data_and_address_is_data_authority.snap new file mode 100644 index 000000000..c03cd39ba --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_msg_pack_data_and_address_is_data_authority.snap @@ -0,0 +1,89 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 714 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "2pY3t29uxpBotbmKbCsQNjYfML5DBoBshDgB7hpHu3XA", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "HKwwF4sPVYFPgqEgxth4GZRZjJ9o4E3wA8eu2QM5Vt3H" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3936000, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "data_len": 30, + "index": 0, + "offset": 119, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "type": "AppData", + "adapter_config": { + "data_authority": { + "type": "Address", + "address": "CYHj4vYrPnuQ1pyqrjeRBJJ4QA4LfyoFaaRgSow16Ziw" + }, + "schema": "MsgPack" + }, + "data_offset": 154, + "data": { + "message": "Hello", + "target": "msgpack" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_binary_data.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_binary_data.snap new file mode 100644 index 000000000..7f547acc7 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_binary_data.snap @@ -0,0 +1,94 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 777 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "BVjK8uvqUuH5YU6ThX6A7gznx2xi8BxshawbuFe1Y5Vr", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "41thppJ4z9HnBNbFMLnztXS7seqBptYV1jG8UhxR4vK8", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "HKwwF4sPVYFPgqEgxth4GZRZjJ9o4E3wA8eu2QM5Vt3H" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3824640, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "data_len": 13, + "index": 0, + "offset": 119, + "authority": { + "type": "None", + "address": null + }, + "type": "DataSection", + "adapter_config": { + "parent_key": { + "linked_app_data": { + "type": "Address", + "address": "CYHj4vYrPnuQ1pyqrjeRBJJ4QA4LfyoFaaRgSow16Ziw" + } + }, + "schema": "Binary" + }, + "data_offset": 155, + "data": "SGVsbG8sIHdvcmxkIQ==" + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_json_data.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_json_data.snap new file mode 100644 index 000000000..aef5e0e19 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_json_data.snap @@ -0,0 +1,97 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 840 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "9vqNxe6M6t7PYo1gXrY18hVgDvCpouHSZ6vdDEFbybeA", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "2aUn89GKuSjfYTeCH6GL1Y6CiUYqjvcgZehFGDJbhNeW", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "HKwwF4sPVYFPgqEgxth4GZRZjJ9o4E3wA8eu2QM5Vt3H" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3762000, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "data_len": 36, + "index": 0, + "offset": 119, + "authority": { + "type": "None", + "address": null + }, + "type": "DataSection", + "adapter_config": { + "parent_key": { + "linked_app_data": { + "type": "Owner", + "address": null + } + }, + "schema": "Json" + }, + "data_offset": 123, + "data": { + "message": "Hello", + "target": "world" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_msg_pack_data.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_msg_pack_data.snap new file mode 100644 index 000000000..6ca45caeb --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_msg_pack_data.snap @@ -0,0 +1,97 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 903 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "EuXEcqHhF9jPxV9CKB5hjHC2TRo3xprdgk5vJTc9qRaY", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "53q1PCBy5KgzZfoHu6bnLWQFVmJtKyceP8DqNMhXWUaA", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "HKwwF4sPVYFPgqEgxth4GZRZjJ9o4E3wA8eu2QM5Vt3H" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3720240, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "data_len": 30, + "index": 0, + "offset": 119, + "authority": { + "type": "None", + "address": null + }, + "type": "DataSection", + "adapter_config": { + "parent_key": { + "linked_app_data": { + "type": "UpdateAuthority", + "address": null + } + }, + "schema": "MsgPack" + }, + "data_offset": 123, + "data": { + "message": "Hello", + "target": "msgpack" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_edition.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_edition.snap new file mode 100644 index 000000000..a52ef8e6a --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_edition.snap @@ -0,0 +1,78 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 238 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "AejY8LGKAbQsrGZS1qgN4uFu99dJD3f8Js9Yrt7K3tCc", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3386160, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": { + "edition": { + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "index": 0, + "offset": 119, + "data": { + "number": 1 + } + } + }, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_multiple_internal_and_external_plugins.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_multiple_internal_and_external_plugins.snap new file mode 100644 index 000000000..ecdd918be --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_multiple_internal_and_external_plugins.snap @@ -0,0 +1,156 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 496 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "Aw7KSaeRECbjLW7BYTUtMwGkaiAGhxrQxdLnpLYRnmbB", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0005, + "basis_points": 5, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": false, + "delegated": true, + "delegate": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 4882560, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": { + "royalties": { + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "index": 2, + "offset": 155, + "data": { + "creators": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "percentage": 100 + } + ], + "basis_points": 5, + "rule_set": "None" + } + }, + "freeze_delegate": { + "authority": { + "type": "Address", + "address": "JDnbeBUv9MaMGg28sGjUjfo7aUwBwHAFQHkvYZGA1aPh" + }, + "index": 4, + "offset": 231, + "data": { + "frozen": false + } + }, + "transfer_delegate": { + "authority": { + "type": "Owner", + "address": null + }, + "index": 0, + "offset": 119, + "data": {} + } + }, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "type": "Oracle", + "index": 1, + "offset": 120, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "lifecycle_checks": { + "create": [ + "CanReject" + ], + "transfer": [ + "CanReject" + ] + }, + "adapter_config": { + "base_address_config": null, + "base_address": "4dbE64e2ALgNGMJLRBTei2qjzR5SKApKmvoLnA1aqPZB", + "results_offset": "Anchor" + } + }, + { + "type": "Oracle", + "index": 3, + "offset": 196, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "lifecycle_checks": { + "update": [ + "CanReject" + ], + "burn": [ + "CanReject" + ] + }, + "adapter_config": { + "base_address_config": null, + "base_address": "8v4xtK8C69wEGhfpGm7LN8dsWBwyEYLamHEYjcKfqdCg", + "results_offset": "Anchor" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_custom_offset_and_base_address_config.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_custom_offset_and_base_address_config.snap new file mode 100644 index 000000000..17ea4da4b --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_custom_offset_and_base_address_config.snap @@ -0,0 +1,93 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 393 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "9v2H5sDBXKmYkGHebfaWwdgBWuMTBVWQom3QeEcV8oJj", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "new name 2", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3755040, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "type": "Oracle", + "index": 0, + "offset": 119, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "lifecycle_checks": { + "update": [ + "CanReject" + ] + }, + "adapter_config": { + "base_address_config": { + "preconfigured_asset": { + "is_signer": false, + "is_writable": false + } + }, + "base_address": "4RZ7RhXeL4oz4kVX5fpRfkNQ3nz1n4eruqBn2AGPQepo", + "results_offset": { + "custom": 48 + } + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_external_plugin_on_collection.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_external_plugin_on_collection.snap new file mode 100644 index 000000000..9d9a60b9a --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_external_plugin_on_collection.snap @@ -0,0 +1,88 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 331 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreCollection", + "id": "Hvdg2FjMEndC4jxF2MJgKCaj5omLLZ19LNfD4p9oXkpE", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/collection", + "files": [], + "metadata": { + "name": "Test Collection", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 2067120, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "num_minted": 0, + "current_size": 0, + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "type": "Oracle", + "index": 0, + "offset": 103, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "lifecycle_checks": { + "burn": [ + "CanReject" + ] + }, + "adapter_config": { + "base_address_config": null, + "base_address": "4iV6bzDfTrpvfeovNwwKFkdLneb5GWSLv8nz7F3mdHfG", + "results_offset": "Anchor" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_multiple_lifecycle_events.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_multiple_lifecycle_events.snap new file mode 100644 index 000000000..2e8cb9b87 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_multiple_lifecycle_events.snap @@ -0,0 +1,89 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 362 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "3puHPHUHFXxhS7qPQa5YYTngzPbetoWbu7y2UxxB6xrF", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3713280, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "type": "Oracle", + "index": 0, + "offset": 119, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "lifecycle_checks": { + "create": [ + "CanReject" + ], + "transfer": [ + "CanReject" + ] + }, + "adapter_config": { + "base_address_config": null, + "base_address": "4LDi2mR1JPsvdfwHhjmW4ZAZMbWE9rtBPUL6uQAgmWLz", + "results_offset": "Anchor" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_no_offset.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_no_offset.snap new file mode 100644 index 000000000..abad82c46 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_no_offset.snap @@ -0,0 +1,86 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 424 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "2TZpUiBiyMdwLFTKRshVMHK8anQK2W8XXbfUfyxR8yvc", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3678480, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "type": "Oracle", + "index": 0, + "offset": 119, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "lifecycle_checks": { + "burn": [ + "CanReject" + ] + }, + "adapter_config": { + "base_address_config": null, + "base_address": "J9JBrE7Jravcpc7Px1iWSXTdvA2Kgt5yB5kQoWSvoQFo", + "results_offset": "NoOffset" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_pubkey_in_rule_set.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_pubkey_in_rule_set.snap new file mode 100644 index 000000000..e14dc617a --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_pubkey_in_rule_set.snap @@ -0,0 +1,101 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 269 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "8H71x9Bhh9E9o3MZK4QnVC5MRFn1WZRf2Mc9w2wEbG5V", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://arweave.net/JIdaRM9RXQUNhigQ7ochqc3AAVl95uGHAsilhcSMUC4", + "files": [], + "metadata": { + "name": "TestCore", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "GNDf9uggWELLWkRQAA62kCbMcATQcPw2Acz7ZnK347wP", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "8LsUNkb7bLaAcZ7NjRKPuvcyRGTWbm9BxzUpVKjqdV8o", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "8LsUNkb7bLaAcZ7NjRKPuvcyRGTWbm9BxzUpVKjqdV8o" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 4137840, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": { + "royalties": { + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "index": 0, + "offset": 155, + "data": { + "creators": [ + { + "address": "8LsUNkb7bLaAcZ7NjRKPuvcyRGTWbm9BxzUpVKjqdV8o", + "percentage": 100 + } + ], + "basis_points": 500, + "rule_set": { + "program_deny_list": [ + "BGUMAp9Gq7iTEuizy4pqaxsTyUCBK68MDfK752saRPUY" + ] + } + } + } + }, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_two_oracle_external_plugins.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_two_oracle_external_plugins.snap new file mode 100644 index 000000000..74764d194 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_two_oracle_external_plugins.snap @@ -0,0 +1,105 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 300 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "4aarnaiMVtGEp5nToRqBEUGtqY2F1gW2V8bBQe1rN5V9", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "new name 2", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 4075200, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "type": "Oracle", + "index": 0, + "offset": 119, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "lifecycle_checks": { + "update": [ + "CanReject" + ] + }, + "adapter_config": { + "base_address_config": null, + "base_address": "655aWrKDz4hNFGaSzxmT9xuwxTKPrvb5SMDohCfVFCXL", + "results_offset": "Anchor" + } + }, + { + "type": "Oracle", + "index": 1, + "offset": 154, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "lifecycle_checks": { + "update": [ + "CanReject" + ] + }, + "adapter_config": { + "base_address_config": null, + "base_address": "6CGzRzs53RppPfSimeEjEYhKMagP9rT9zZrEVVgjSq7A", + "results_offset": "Anchor" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_authority.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_authority.snap new file mode 100644 index 000000000..5acbdeb5d --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_authority.snap @@ -0,0 +1,133 @@ +--- +source: integration_tests/tests/integration_tests/mpl_core_tests.rs +assertion_line: 108 +expression: response +--- +{ + "total": 2, + "limit": 50, + "page": 1, + "items": [ + { + "interface": "MplCoreAsset", + "id": "4FcFVJVPRsYoMjt8ewDGV5nipoK63SNrJzjrBHyXvhcz", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "9CSyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci" + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "GzYvuu9aUYXmnardj4svbAcCNmefiaGu2E3knGw9NJQQ" + }, + "supply": null, + "mutable": true, + "burnt": false, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [] + }, + { + "interface": "MplCoreCollection", + "id": "9CSyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/collection", + "files": [], + "metadata": { + "name": "Test Collection", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "plugins": {}, + "mpl_core_info": { + "num_minted": 1, + "current_size": 1, + "plugins_json_version": 1 + }, + "external_plugins": [] + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group.snap new file mode 100644 index 000000000..ba325a9ce --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group.snap @@ -0,0 +1,211 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 167 +expression: response +snapshot_kind: text +--- +{ + "total": 3, + "limit": 50, + "page": 1, + "items": [ + { + "interface": "MplCoreAsset", + "id": "kTMCCKLTaZsnSReer12HsciwScUwhHyZyd9D9BwQF8k", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "JChzyyp1CnNz56tJLteQ5BsbngmWQ3JwcxLZrmuQA5b7", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3156480, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [] + }, + { + "interface": "MplCoreAsset", + "id": "EgzsppfYJmUet4ve8MnuHMyvSnj6R7LRmwsGEH5TuGhB", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "JChzyyp1CnNz56tJLteQ5BsbngmWQ3JwcxLZrmuQA5b7", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3156480, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [] + }, + { + "interface": "MplCoreAsset", + "id": "J2kazVRuZ33Po4PVyZGxiDYUMQ1eZiT5Xa13usRYo264", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "JChzyyp1CnNz56tJLteQ5BsbngmWQ3JwcxLZrmuQA5b7", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3156480, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [] + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds.snap new file mode 100644 index 000000000..3857cec5e --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds.snap @@ -0,0 +1,136 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 465 +expression: response +snapshot_kind: text +--- +{ + "total": 1, + "limit": 50, + "page": 1, + "items": [ + { + "interface": "MplCoreAsset", + "id": "CWJDcrzxSDE7FeNRzMK1aSia7qoaUPrrGQ81E7vkQpq4", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "Do7rVGmVNa9wjsKNyjoa5phqriLER6HCqUQm5zyoTX3f", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "FAe4nM85BQ8b1nWEc5TTeogQGnNDWsuGYU84vuiPc7kE" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 4151760, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "type": "Oracle", + "index": 0, + "offset": 119, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "lifecycle_checks": { + "transfer": [ + "CanReject" + ] + }, + "adapter_config": { + "base_address_config": { + "custom_pda": { + "is_signer": false, + "seeds": [ + "Collection", + "Owner", + "Recipient", + "Asset", + { + "address": "HmQ75Sraz5HoTzVGpMuepXVgxwfiKyN5Mar8NxDqL9dh" + }, + { + "bytes": [ + 101, + 120, + 97, + 109, + 112, + 108, + 101, + 45, + 115, + 101, + 101, + 100, + 45, + 98, + 121, + 116, + 101, + 115 + ] + } + ], + "custom_program_id": null, + "is_writable": false + } + }, + "base_address": "4RZ7RhXeL4oz4kVX5fpRfkNQ3nz1n4eruqBn2AGPQepo", + "results_offset": "Anchor" + } + } + ] + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_owner.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_owner.snap new file mode 100644 index 000000000..43366dda3 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_owner.snap @@ -0,0 +1,79 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 207 +expression: response +snapshot_kind: text +--- +{ + "total": 1, + "limit": 50, + "page": 1, + "items": [ + { + "interface": "MplCoreAsset", + "id": "9tsHoBrkSqBW5uMxKZyvxL6m9CCaz1a7sGEg8SuckUj", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "4FFhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "7uScVQiT4vArB88dHrZoeVKWbtsRJmNp9r5Gce5VQpXS" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3156480, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [] + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection.snap new file mode 100644 index 000000000..fefe0a68e --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection.snap @@ -0,0 +1,80 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 79 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreCollection", + "id": "DHciVfQxHHM7t2asQJRjjkKbjvZ4PuG3Y3uiULMQUjJQ", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/collection", + "files": [], + "metadata": { + "name": "Test Collection", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 1997520, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": { + "update_delegate": { + "authority": { + "type": "Address", + "address": "BpEw34ZXHm9TeqnPbcsHZJnx7StGa3GxBkZ3uTD9scfZ" + }, + "index": 0, + "offset": 103, + "data": { + "additional_delegates": [] + } + } + }, + "mpl_core_info": { + "num_minted": 1, + "current_size": 1, + "plugins_json_version": 1 + }, + "external_plugins": [] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_binary_data_and_address_is_data_authority.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_binary_data_and_address_is_data_authority.snap new file mode 100644 index 000000000..5c21c2713 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_binary_data_and_address_is_data_authority.snap @@ -0,0 +1,85 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 746 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreCollection", + "id": "41thppJ4z9HnBNbFMLnztXS7seqBptYV1jG8UhxR4vK8", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/collection", + "files": [], + "metadata": { + "name": "Test Collection", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 2004480, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "num_minted": 1, + "current_size": 1, + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "type": "LinkedAppData", + "index": 0, + "offset": 103, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "adapter_config": { + "data_authority": { + "type": "Address", + "address": "CYHj4vYrPnuQ1pyqrjeRBJJ4QA4LfyoFaaRgSow16Ziw" + }, + "schema": "Binary" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_json_data_and_owner_is_data_authority.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_json_data_and_owner_is_data_authority.snap new file mode 100644 index 000000000..8a0fde793 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_json_data_and_owner_is_data_authority.snap @@ -0,0 +1,85 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 809 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreCollection", + "id": "2aUn89GKuSjfYTeCH6GL1Y6CiUYqjvcgZehFGDJbhNeW", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/collection", + "files": [], + "metadata": { + "name": "Test Collection", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 1781760, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "num_minted": 1, + "current_size": 1, + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "type": "LinkedAppData", + "index": 0, + "offset": 103, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "adapter_config": { + "data_authority": { + "type": "Owner", + "address": null + }, + "schema": "Json" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_msg_pack_data_and_update_authority_is_data_authority.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_msg_pack_data_and_update_authority_is_data_authority.snap new file mode 100644 index 000000000..10e483934 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_msg_pack_data_and_update_authority_is_data_authority.snap @@ -0,0 +1,85 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 872 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreCollection", + "id": "53q1PCBy5KgzZfoHu6bnLWQFVmJtKyceP8DqNMhXWUaA", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/collection", + "files": [], + "metadata": { + "name": "Test Collection", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 1781760, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": {}, + "mpl_core_info": { + "num_minted": 1, + "current_size": 1, + "plugins_json_version": 1 + }, + "external_plugins": [ + { + "type": "LinkedAppData", + "index": 0, + "offset": 103, + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "adapter_config": { + "data_authority": { + "type": "UpdateAuthority", + "address": null + }, + "schema": "MsgPack" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin.snap new file mode 100644 index 000000000..7b414ff39 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin.snap @@ -0,0 +1,78 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 589 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "AGyjcG9mBfYJFMZiJVkXr4iX7re6vkQ1Fw5grukA6Hiu", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3386160, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": { + "verified_creators": { + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "index": 0, + "offset": 119, + "data": { + "signatures": [] + } + } + }, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [] +} diff --git a/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin_with_signature.snap b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin_with_signature.snap new file mode 100644 index 000000000..1f7da18ac --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin_with_signature.snap @@ -0,0 +1,83 @@ +--- +source: integration_tests/src/mpl_core_tests.rs +assertion_line: 620 +expression: response +snapshot_kind: text +--- +{ + "interface": "MplCoreAsset", + "id": "4iVX1oZj6nLAMerjXFw3UeGD4QU7BEaCscsWqD3zEH37", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://example.com/asset", + "files": [], + "metadata": { + "name": "Test Asset", + "symbol": "" + }, + "links": {} + }, + "authorities": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "D9SSaw4tz5AGpfWynYJ66jDCVNTsbLBkqT8rxQFenqj4" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 3615840, + "executable": false, + "rent_epoch": 18446744073709551615, + "plugins": { + "verified_creators": { + "authority": { + "type": "UpdateAuthority", + "address": null + }, + "index": 0, + "offset": 119, + "data": { + "signatures": [ + { + "address": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "verified": true + } + ] + } + } + }, + "mpl_core_info": { + "plugins_json_version": 1 + }, + "external_plugins": [] +} diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset.snap new file mode 100644 index 000000000..3bd76795e --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset.snap @@ -0,0 +1,87 @@ +--- +source: integration_tests/src/regular_nft_tests.rs +assertion_line: 39 +expression: response +snapshot_kind: text +--- +{ + "interface": "ProgrammableNFT", + "id": "CMVuYDS9nTeujfTPJb8ik7CRhAqZv4DfjfdamFLkJgxE", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://madlads.s3.us-west-2.amazonaws.com/json/9964.json", + "files": [], + "metadata": { + "name": "Mad Lads #9964", + "symbol": "MAD", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.042, + "basis_points": 420, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "5XvhfmRjwXkGp3jHGmaKpqeerNYjkuZZBYLVQYdeVcRv", + "share": 0, + "verified": true + }, + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "GBkD9WsmSPNBF7bnFxyC4BCC2ZiBzYq9iWStPZ9Kj9cU", + "ownership_model": "single", + "owner": "A59E2tNJEqNN9TDnzgGnmLmnTsdRDoPocGx3n1w2dqZw" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 0, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "4Gwjsd6QrkEuU7nBECFnCgikiSfVyi97bXjeKj9ytF9p", + "freeze_authority": "4Gwjsd6QrkEuU7nBECFnCgikiSfVyi97bXjeKj9ytF9p" + } +} diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-2-and-a-missing-1.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-2-and-a-missing-1.snap new file mode 100644 index 000000000..23e74e6a8 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-2-and-a-missing-1.snap @@ -0,0 +1,171 @@ +--- +source: integration_tests/src/regular_nft_tests.rs +assertion_line: 92 +expression: response +snapshot_kind: text +--- +[ + { + "interface": "ProgrammableNFT", + "id": "2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://madlads.s3.us-west-2.amazonaws.com/json/9959.json", + "files": [], + "metadata": { + "name": "Mad Lads #9959", + "symbol": "MAD", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.042, + "basis_points": 420, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "5XvhfmRjwXkGp3jHGmaKpqeerNYjkuZZBYLVQYdeVcRv", + "share": 0, + "verified": true + }, + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "CimD28VvDSHcGdErmLbbPimnkfwrKHvstNiQUutZDxWS", + "ownership_model": "single", + "owner": "9PacVenjPyQYiWBha89UYRM1nn6mf9bGY7vi32zY6DLn" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 0, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "6BiYUFJnBuHMuhNot6GvJi8gCNT3yqQqxxgV6rtnSK1c", + "freeze_authority": "6BiYUFJnBuHMuhNot6GvJi8gCNT3yqQqxxgV6rtnSK1c" + } + }, + null, + { + "interface": "ProgrammableNFT", + "id": "5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://madlads.s3.us-west-2.amazonaws.com/json/9963.json", + "files": [], + "metadata": { + "name": "Mad Lads #9963", + "symbol": "MAD", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.042, + "basis_points": 420, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "5XvhfmRjwXkGp3jHGmaKpqeerNYjkuZZBYLVQYdeVcRv", + "share": 0, + "verified": true + }, + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "CcYTgeWQhsdyGYbJAzUuuPG6jvNhfyzXmNWVSrxbdC9a", + "ownership_model": "single", + "owner": "3H3d3hfpZVVdVwuFAxDtDSFN2AdR7kwiDA3ynbnbkhc9" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 0, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "5WVxGy4xQ7Eujb1JCFMWCC4dg1LBo9RgzsfF3YLybgKR", + "freeze_authority": "5WVxGy4xQ7Eujb1JCFMWCC4dg1LBo9RgzsfF3YLybgKR" + } + } +] diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2-different-2.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2-different-2.snap new file mode 100644 index 000000000..df528f422 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2-different-2.snap @@ -0,0 +1,170 @@ +--- +source: integration_tests/src/regular_nft_tests.rs +assertion_line: 92 +expression: response +snapshot_kind: text +--- +[ + { + "interface": "ProgrammableNFT", + "id": "2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://madlads.s3.us-west-2.amazonaws.com/json/9959.json", + "files": [], + "metadata": { + "name": "Mad Lads #9959", + "symbol": "MAD", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.042, + "basis_points": 420, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "5XvhfmRjwXkGp3jHGmaKpqeerNYjkuZZBYLVQYdeVcRv", + "share": 0, + "verified": true + }, + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "CimD28VvDSHcGdErmLbbPimnkfwrKHvstNiQUutZDxWS", + "ownership_model": "single", + "owner": "9PacVenjPyQYiWBha89UYRM1nn6mf9bGY7vi32zY6DLn" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 0, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "6BiYUFJnBuHMuhNot6GvJi8gCNT3yqQqxxgV6rtnSK1c", + "freeze_authority": "6BiYUFJnBuHMuhNot6GvJi8gCNT3yqQqxxgV6rtnSK1c" + } + }, + { + "interface": "ProgrammableNFT", + "id": "5rEeYv8R25b8j6YTHJvYuCKEzq44UCw1Wx1Wx2VPPLz1", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://madlads.s3.us-west-2.amazonaws.com/json/9963.json", + "files": [], + "metadata": { + "name": "Mad Lads #9963", + "symbol": "MAD", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.042, + "basis_points": 420, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "5XvhfmRjwXkGp3jHGmaKpqeerNYjkuZZBYLVQYdeVcRv", + "share": 0, + "verified": true + }, + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "CcYTgeWQhsdyGYbJAzUuuPG6jvNhfyzXmNWVSrxbdC9a", + "ownership_model": "single", + "owner": "3H3d3hfpZVVdVwuFAxDtDSFN2AdR7kwiDA3ynbnbkhc9" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 0, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "5WVxGy4xQ7Eujb1JCFMWCC4dg1LBo9RgzsfF3YLybgKR", + "freeze_authority": "5WVxGy4xQ7Eujb1JCFMWCC4dg1LBo9RgzsfF3YLybgKR" + } + } +] diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2.snap new file mode 100644 index 000000000..dd57b22a7 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2.snap @@ -0,0 +1,170 @@ +--- +source: integration_tests/src/regular_nft_tests.rs +assertion_line: 92 +expression: response +snapshot_kind: text +--- +[ + { + "interface": "ProgrammableNFT", + "id": "HTKAVZZrDdyecCxzm3WEkCsG1GUmiqKm73PvngfuYRNK", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://madlads.s3.us-west-2.amazonaws.com/json/9965.json", + "files": [], + "metadata": { + "name": "Mad Lads #9965", + "symbol": "MAD", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.042, + "basis_points": 420, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "5XvhfmRjwXkGp3jHGmaKpqeerNYjkuZZBYLVQYdeVcRv", + "share": 0, + "verified": true + }, + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "Hys4KoEw32y4sLHb96K2PqYT5jUHqUWdh6qS9EACEB4Y", + "ownership_model": "single", + "owner": "BaBQKh34KrqZzd4ifSHQYMf86HiBGASN6TWUi1ZwfyKv" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 0, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "HtNRkAnm459dDAxznkh2jEsHXkKRUfkojToMTnxiwWNk", + "freeze_authority": "HtNRkAnm459dDAxznkh2jEsHXkKRUfkojToMTnxiwWNk" + } + }, + { + "interface": "ProgrammableNFT", + "id": "2NqdYX6kJmMUoChnDXU2UrP9BsoPZivRw3uJG8iDhRRd", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://madlads.s3.us-west-2.amazonaws.com/json/9959.json", + "files": [], + "metadata": { + "name": "Mad Lads #9959", + "symbol": "MAD", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.042, + "basis_points": 420, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "5XvhfmRjwXkGp3jHGmaKpqeerNYjkuZZBYLVQYdeVcRv", + "share": 0, + "verified": true + }, + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "CimD28VvDSHcGdErmLbbPimnkfwrKHvstNiQUutZDxWS", + "ownership_model": "single", + "owner": "9PacVenjPyQYiWBha89UYRM1nn6mf9bGY7vi32zY6DLn" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 0, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "6BiYUFJnBuHMuhNot6GvJi8gCNT3yqQqxxgV6rtnSK1c", + "freeze_authority": "6BiYUFJnBuHMuhNot6GvJi8gCNT3yqQqxxgV6rtnSK1c" + } + } +] diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_by_group.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_by_group.snap new file mode 100644 index 000000000..530503189 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_by_group.snap @@ -0,0 +1,99 @@ +--- +source: integration_tests/src/regular_nft_tests.rs +assertion_line: 129 +expression: response +snapshot_kind: text +--- +{ + "total": 1, + "limit": 1, + "page": 1, + "items": [ + { + "interface": "ProgrammableNFT", + "id": "7jFuJ73mBPDdLMvCYxzrpFTD9FeDudRxdXGDALP5Cp2W", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://nftstorage.link/ipfs/bafybeidbjja4gycfx2ywaicfpdv2czog6qp75zasluknexukidqgnzby3y/5517.json", + "files": [], + "metadata": { + "name": "SMB Gen3 #5518", + "symbol": "", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "GfELr1GA9bLmgiMymUm7h8nDkZLG2Ls6txSsANopeVEW", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "8Rt3Ayqth4DAiPnW9MDFi63TiQJHmohfTWLMQFHi4KZH", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "HV4Nvm9zHfNA43JYYkjZu8vwqiuE8bfEhwcKFfyQ65o5", + "share": 0, + "verified": true + }, + { + "address": "smbBn7Votkw2upiVZtX9WgkmC7fNW2QabfVFuPLhu3C", + "share": 0, + "verified": false + }, + { + "address": "HqqiyJcm3yWPyzwisRKAQa2bJAj14V837yJRGaxwRFaG", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "9qUcfdADyrrTSetFjNjF9Ro7LKAqzJkzZV6WKLHfv5MU" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 0, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "8cYUXv8x9ekF2JUQiiheaf6v9NfNt1ZMWmwHG5gTXkgp", + "freeze_authority": "8cYUXv8x9ekF2JUQiiheaf6v9NfNt1ZMWmwHG5gTXkgp" + } + } + ] +} diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_search_assets.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_search_assets.snap new file mode 100644 index 000000000..bfebe7d82 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_search_assets.snap @@ -0,0 +1,94 @@ +--- +source: integration_tests/src/regular_nft_tests.rs +assertion_line: 159 +expression: response +snapshot_kind: text +--- +{ + "total": 1, + "limit": 2, + "page": 1, + "items": [ + { + "interface": "ProgrammableNFT", + "id": "Dt3XDSAdXAJbHqvuycgCTHykKCC7tntMFGMmSvfBbpTL", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://arweave.net/3EM4n3RvYwji7FqwqZ5FX2ULDsLMV5R_AXXlNcI5ls4", + "files": [], + "metadata": { + "name": "Claynosaurz: Call of Saga #1887", + "symbol": "DINO", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "G2R7KKR9aycrMDzR2EK2Cs69f2NNBRK8AZUuaohQBg2r", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "1yPMtWU5aqcF72RdyRD5yipmcMRC8NGNK59NvYubLkZ", + "verified": true + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "47besT5AjYkf8bdtzxt7k8rZbKr612Z3cayEx2wHCtLn", + "share": 0, + "verified": true + }, + { + "address": "36tfiBtaDGjAMKd6smPacHQhe4MXycLL6f9ww9CD1naT", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "6Cr66AabRYymhZgYQSfTCo6FVpH18wXrMZswAbcErpyX" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 0, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "AMgreyN1PLncz1gh4RFTpAyP7gscfKKPF3swky7Kpbqo", + "freeze_authority": "AMgreyN1PLncz1gh4RFTpAyP7gscfKKPF3swky7Kpbqo" + } + } + ] +} diff --git a/integrity_verification/src/params.rs b/integrity_verification/src/params.rs index ec8c5aad1..ce5ecd5fa 100644 --- a/integrity_verification/src/params.rs +++ b/integrity_verification/src/params.rs @@ -66,7 +66,7 @@ pub fn generate_get_assets_by_group_params( before, after, cursor: None, - options: None, + options: Default::default(), } } @@ -88,7 +88,7 @@ pub fn generate_get_assets_by_owner_params( before, after, cursor: None, - options: None, + options: Default::default(), } } @@ -112,7 +112,7 @@ pub fn generate_get_assets_by_creator_params( before, after, cursor: None, - options: None, + options: Default::default(), } } @@ -134,12 +134,15 @@ pub fn generate_get_assets_by_authority_params( before, after, cursor: None, - options: None, + options: Default::default(), } } pub fn generate_get_asset_params(id: String) -> GetAsset { - GetAsset { id, options: None } + GetAsset { + id, + options: Default::default(), + } } pub fn generate_get_asset_proof_params(id: String) -> GetAssetProof { diff --git a/interface/src/json.rs b/interface/src/json.rs index 1a33331cd..158db30d1 100644 --- a/interface/src/json.rs +++ b/interface/src/json.rs @@ -1,11 +1,19 @@ use async_trait::async_trait; use mockall::automock; +#[derive(Debug, Clone)] +pub enum JsonDownloadResult { + JsonContent(String), + MediaUrlAndMimeType { url: String, mime_type: String }, +} + #[automock] #[async_trait] pub trait JsonDownloader { - async fn download_file(&self, url: String) - -> Result; + async fn download_file( + &self, + url: String, + ) -> Result; } #[automock] @@ -13,6 +21,9 @@ pub trait JsonDownloader { pub trait JsonPersister { async fn persist_response( &self, - results: Vec<(String, Result)>, + results: Vec<( + String, + Result, + )>, ) -> Result<(), crate::error::JsonDownloaderError>; } diff --git a/interface/src/price_fetcher.rs b/interface/src/price_fetcher.rs index f67e69426..0ad6ea874 100644 --- a/interface/src/price_fetcher.rs +++ b/interface/src/price_fetcher.rs @@ -12,10 +12,10 @@ pub trait PriceFetcher { pub trait TokenPriceFetcher { async fn fetch_token_symbols( &self, - token_ids: &[Pubkey], + token_ids: &[String], ) -> Result, UsecaseError>; async fn fetch_token_prices( &self, - token_ids: &[Pubkey], + token_ids: &[String], ) -> Result, UsecaseError>; } diff --git a/metrics_utils/src/lib.rs b/metrics_utils/src/lib.rs index e166b879e..fcba2f26c 100644 --- a/metrics_utils/src/lib.rs +++ b/metrics_utils/src/lib.rs @@ -163,6 +163,7 @@ pub struct BackfillerMetricsConfig { slots_collected: Family, data_processed: Family, // slots & transactions last_processed_slot: Family, + slot_delay: Family, } impl Default for BackfillerMetricsConfig { @@ -177,9 +178,20 @@ impl BackfillerMetricsConfig { slots_collected: Family::::default(), data_processed: Family::::default(), last_processed_slot: Family::::default(), + slot_delay: Family::::new_with_constructor(|| { + Histogram::new(exponential_buckets(400.0, 1.8, 10)) + }), } } + pub fn set_slot_delay_time(&self, label: &str, duration: f64) { + self.slot_delay + .get_or_create(&MetricLabel { + name: label.to_string(), + }) + .observe(duration); + } + pub fn inc_slots_collected(&self, label: &str, status: MetricStatus) -> u64 { self.slots_collected .get_or_create(&MetricLabelWithStatus { @@ -223,6 +235,12 @@ impl BackfillerMetricsConfig { "The last processed slot by backfiller", self.last_processed_slot.clone(), ); + + registry.register( + format!("{}slot_delay", prefix), + "The delay between the slot time and the time when it was processed by the backfiller", + self.slot_delay.clone(), + ); } pub fn register(&self, registry: &mut Registry) { diff --git a/metrics_utils/src/utils.rs b/metrics_utils/src/utils.rs index 705a5afe4..2cd039ba2 100644 --- a/metrics_utils/src/utils.rs +++ b/metrics_utils/src/utils.rs @@ -82,10 +82,10 @@ pub async fn start_metrics(register: Registry, port: Option) { tokio::spawn(async move { match setup_metrics(register, port).await { Ok(_) => { - info!("Setup metrics successfully") + info!("Metrics server stopped successfully"); } Err(e) => { - error!("Setup metrics failed: {:?}", e) + error!("Metrics server stopped with an error: {:?}", e) } } }); diff --git a/migrations/1_init.sql b/migrations/1_init.sql index a5119bb8f..5e9d93e53 100644 --- a/migrations/1_init.sql +++ b/migrations/1_init.sql @@ -123,4 +123,4 @@ CREATE TABLE last_synced_key ( ); -- Insert an initial row (assuming there's no last_synced_key initially) -INSERT INTO last_synced_key (last_synced_asset_update_key) VALUES (null); +INSERT INTO last_synced_key (last_synced_asset_update_key) VALUES (null); \ No newline at end of file diff --git a/nft_ingester/Cargo.toml b/nft_ingester/Cargo.toml index 5ea398f8c..693e43c67 100644 --- a/nft_ingester/Cargo.toml +++ b/nft_ingester/Cargo.toml @@ -80,6 +80,10 @@ coingecko = { workspace = true } libreplex_inscriptions = { workspace = true } spl-token-2022 = { workspace = true } moka = { workspace = true } +indicatif = { workspace = true } +tokio-retry = { workspace = true } +axum = { workspace = true } +rocksdb = { workspace = true } [dev-dependencies] setup = { path = "../tests/setup" } @@ -121,11 +125,23 @@ name = "ingester" [[bin]] name = "migrator" -[[bin]] -name = "raw_backfiller" - [[bin]] name = "synchronizer" [[bin]] name = "raw_backup" + +[[bin]] +name = "backfill" + +[[bin]] +name = "slot_persister" + +[[bin]] +name = "slot_checker" + +[[bin]] +name = "explorer" + +[[bin]] +name = "synchronizer_utils" diff --git a/nft_ingester/src/api/api_impl.rs b/nft_ingester/src/api/api_impl.rs index 369c7885b..a70a6f098 100644 --- a/nft_ingester/src/api/api_impl.rs +++ b/nft_ingester/src/api/api_impl.rs @@ -59,6 +59,7 @@ where /// E.g. https://storage-service.xyz/ storage_service_base_path: Option, token_price_fetcher: Arc, + native_mint_pubkey: String, } pub fn not_found() -> DasApiError { @@ -88,6 +89,7 @@ where account_balance_getter: Arc, storage_service_base_path: Option, token_price_fetcher: Arc, + native_mint_pubkey: String, ) -> Self { DasApi { pg_client, @@ -102,6 +104,7 @@ where account_balance_getter, storage_service_base_path, token_price_fetcher, + native_mint_pubkey, } } @@ -308,7 +311,7 @@ where let latency_timer = Instant::now(); let id = validate_pubkey(payload.id.clone())?; - let options = payload.options.unwrap_or_default(); + let options = payload.options; let res = get_asset( self.rocks_db.clone(), @@ -356,7 +359,7 @@ where .into_iter() .map(validate_pubkey) .collect::, _>>()?; - let options = payload.options.unwrap_or_default(); + let options = payload.options; let res = get_asset_batch( self.rocks_db.clone(), @@ -702,7 +705,7 @@ where { let pagination = payload.get_all_pagination_parameters(); let sort_by = payload.get_sort_parameter().unwrap_or_default(); - let options = payload.get_options().unwrap_or_default(); + let options = payload.get_options(); let query: SearchAssetsQuery = payload .try_into() @@ -731,6 +734,7 @@ where self.token_price_fetcher.clone(), self.metrics.clone(), &self.tree_gaps_checker, + &self.native_mint_pubkey, ) .await?; diff --git a/nft_ingester/src/api/backfilling_state_consistency.rs b/nft_ingester/src/api/backfilling_state_consistency.rs index 1262118c4..1d7443278 100644 --- a/nft_ingester/src/api/backfilling_state_consistency.rs +++ b/nft_ingester/src/api/backfilling_state_consistency.rs @@ -39,11 +39,12 @@ impl BackfillingStateConsistencyChecker { }; tasks.lock().await.spawn(async move { while rx.is_empty() { - overwhelm_backfill_gap.store( - rocks_db.bubblegum_slots.iter_start().count().saturating_add(rocks_db.ingestable_slots.iter_start().count()) - >= consistence_backfilling_slots_threshold as usize, - Ordering::Relaxed, - ); + // TODO: refactor this to use parameter from storage and last slot from slot storage + // overwhelm_backfill_gap.store( + // rocks_db.bubblegum_slots.iter_start().count().saturating_add(rocks_db.ingestable_slots.iter_start().count()) + // >= consistence_backfilling_slots_threshold as usize, + // Ordering::Relaxed, + // ); tokio::select! { _ = tokio::time::sleep(Duration::from_secs(CATCH_UP_SEQUENCES_TIMEOUT_SEC)) => {}, _ = rx.recv() => { diff --git a/nft_ingester/src/api/dapi/asset.rs b/nft_ingester/src/api/dapi/asset.rs index b826cc966..8888574ce 100644 --- a/nft_ingester/src/api/dapi/asset.rs +++ b/nft_ingester/src/api/dapi/asset.rs @@ -3,9 +3,10 @@ use std::string::ToString; use std::sync::Arc; use entities::api_req_params::{AssetSortDirection, Options}; +use entities::enums::SpecificationAssetClass; use entities::models::{AssetSignatureWithPagination, OffChainData}; use interface::asset_sigratures::AssetSignaturesGetter; -use interface::json::{JsonDownloader, JsonPersister}; +use interface::json::{JsonDownloadResult, JsonDownloader, JsonPersister}; use rocks_db::errors::StorageError; use solana_sdk::pubkey::Pubkey; use tracing::error; @@ -14,6 +15,7 @@ use crate::api::dapi::rpc_asset_models::FullAsset; use futures::{stream, StreamExt}; use interface::price_fetcher::TokenPriceFetcher; use interface::processing_possibility::ProcessingPossibilityChecker; +use itertools::Itertools; use metrics_utils::ApiMetricsConfig; use rocks_db::asset::{AssetLeaf, AssetSelectedMaps}; use rocks_db::{AssetAuthority, Storage}; @@ -29,26 +31,21 @@ fn convert_rocks_asset_model( token_symbols: &HashMap, offchain_data: OffChainData, ) -> Result { - let static_data = - asset_selected_maps - .assets_static - .get(asset_pubkey) - .ok_or(StorageError::Common( - "No relevant assets_static_data".to_string(), - ))?; - let dynamic_data = - asset_selected_maps - .assets_dynamic - .get(asset_pubkey) - .ok_or(StorageError::Common( - "No relevant asset_dynamic_data".to_string(), - ))?; - let owner = asset_selected_maps - .assets_owner + let data = asset_selected_maps + .asset_complete_details .get(asset_pubkey) .ok_or(StorageError::Common( - "No relevant assets_owners".to_string(), + "No relevant asset_complete_details".to_string(), ))?; + let static_data = data.static_details.as_ref().ok_or(StorageError::Common( + "No relevant assets_static_data".to_string(), + ))?; + let dynamic_data = data.dynamic_details.as_ref().ok_or(StorageError::Common( + "No relevant asset_dynamic_data".to_string(), + ))?; + let owner = data.owner.as_ref().ok_or(StorageError::Common( + "No relevant assets_owners".to_string(), + ))?; let leaf = asset_selected_maps .assets_leaf @@ -56,15 +53,18 @@ fn convert_rocks_asset_model( .cloned() .unwrap_or(AssetLeaf::default()); // Asset may not have a leaf but we still can make the conversion - let collection_dynamic_data = asset_selected_maps - .assets_collection - .get(asset_pubkey) + let collection_data = data + .collection + .as_ref() .and_then(|collection| { asset_selected_maps - .assets_dynamic + .asset_complete_details .get(&collection.collection.value) }) .cloned(); + let collection_dynamic_data = collection_data + .as_ref() + .and_then(|c| c.dynamic_details.clone()); let inscription = asset_selected_maps.inscriptions.get(asset_pubkey).cloned(); Ok(FullAsset { @@ -73,29 +73,17 @@ fn convert_rocks_asset_model( asset_dynamic: dynamic_data.clone(), asset_leaf: leaf, offchain_data, - asset_collections: asset_selected_maps - .assets_collection - .get(asset_pubkey) - .cloned(), - assets_authority: asset_selected_maps - .assets_authority - .get(asset_pubkey) - .cloned() - .unwrap_or(AssetAuthority::default()), - edition_data: asset_selected_maps - .assets_static - .get(asset_pubkey) - .and_then(|static_details| { - static_details - .edition_address - .and_then(|e| asset_selected_maps.editions.get(&e).cloned()) - }), - mpl_core_collections: asset_selected_maps - .assets_collection - .get(asset_pubkey) + asset_collections: data.collection.clone(), + assets_authority: data.authority.clone(), + edition_data: static_data + .edition_address + .and_then(|e| asset_selected_maps.editions.get(&e).cloned()), + mpl_core_collections: data + .collection + .as_ref() .and_then(|collection| { asset_selected_maps - .assets_collection + .mpl_core_collections .get(&collection.collection.value) }) .cloned(), @@ -148,19 +136,24 @@ fn asset_selected_maps_into_full_asset( options: &Options, ) -> Option { if !options.show_unverified_collections { - if let Some(collection_data) = asset_selected_maps.assets_collection.get(id) { - if !collection_data.is_collection_verified.value { - return None; + if let Some(asset_complete_details) = asset_selected_maps.asset_complete_details.get(id) { + if let Some(asset_static_details) = &asset_complete_details.static_details { + // collection itself cannot have a collection + // TODO!: should we also include in this check FungibleToken? + if &asset_static_details.specification_asset_class != &SpecificationAssetClass::MplCoreCollection { + if let Some(collection_details) = &asset_complete_details.collection { + if !collection_details.is_collection_verified.value { + return None; + } + } + } } - } else { - // don't have collection data == collection unverified - return None; } } let offchain_data = asset_selected_maps .urls - .get(&id.to_string()) + .get(id) .and_then(|url| asset_selected_maps.offchain_data.get(url).cloned()) .unwrap_or_default(); @@ -217,14 +210,23 @@ pub async fn get_by_ids< } let unique_asset_ids: Vec<_> = unique_asset_ids_map.keys().cloned().collect(); - let token_prices_fut = token_price_fetcher.fetch_token_prices(asset_ids.as_slice()); - let token_symbols_fut = token_price_fetcher.fetch_token_symbols(asset_ids.as_slice()); + // request prices and symbols only for fungibles when the option is set. This will prolong the request at least an order of magnitude let asset_selected_maps_fut = rocks_db.get_asset_selected_maps_async(unique_asset_ids.clone(), owner_address, &options); - - let (token_prices, token_symbols, asset_selected_maps) = - tokio::join!(token_prices_fut, token_symbols_fut, asset_selected_maps_fut); - let mut asset_selected_maps = asset_selected_maps?; + let asset_ids_string = asset_ids + .clone() + .into_iter() + .map(|id| id.to_string()) + .collect_vec(); + let (token_prices, token_symbols) = if options.show_fungible { + let token_prices_fut = token_price_fetcher.fetch_token_prices(asset_ids_string.as_slice()); + let token_symbols_fut = + token_price_fetcher.fetch_token_symbols(asset_ids_string.as_slice()); + tokio::join!(token_prices_fut, token_symbols_fut) + } else { + (Ok(HashMap::new()), Ok(HashMap::new())) + }; + let mut asset_selected_maps = asset_selected_maps_fut.await?; let token_prices = token_prices.unwrap_or_else(|e| { error!("Fetch token prices: {}", e); metrics.inc_token_info_fetch_errors("prices"); @@ -240,12 +242,16 @@ pub async fn get_by_ids< let mut urls_to_download = Vec::new(); for (_, url) in asset_selected_maps.urls.iter() { - if urls_to_download.len() >= max_json_to_download { - break; + if url.is_empty() { + continue; } - if !asset_selected_maps.offchain_data.contains_key(url) && !url.is_empty() { + let offchain_data = asset_selected_maps.offchain_data.get(url); + if offchain_data.is_none() || offchain_data.unwrap().metadata.is_empty() { urls_to_download.push(url.clone()); } + if urls_to_download.len() >= max_json_to_download { + break; + } } let num_of_tasks = urls_to_download.len(); @@ -265,14 +271,30 @@ pub async fn get_by_ids< .await; for (json_url, res) in download_results.iter() { - if let Ok(metadata) = res { - asset_selected_maps.offchain_data.insert( - json_url.clone(), - OffChainData { - url: json_url.clone(), - metadata: metadata.clone(), - }, - ); + match res { + Ok(JsonDownloadResult::JsonContent(metadata)) => { + asset_selected_maps.offchain_data.insert( + json_url.clone(), + OffChainData { + url: json_url.clone(), + metadata: metadata.clone(), + }, + ); + } + Ok(JsonDownloadResult::MediaUrlAndMimeType { url, mime_type }) => { + asset_selected_maps.offchain_data.insert( + json_url.clone(), + OffChainData { + url: json_url.clone(), + metadata: format!( + "{{\"image\":\"{}\",\"type\":\"{}\"}}", + url, mime_type + ) + .to_string(), + }, + ); + } + Err(e) => {} } } diff --git a/nft_ingester/src/api/dapi/rpc_asset_convertors.rs b/nft_ingester/src/api/dapi/rpc_asset_convertors.rs index a6f31504f..e970c1094 100644 --- a/nft_ingester/src/api/dapi/rpc_asset_convertors.rs +++ b/nft_ingester/src/api/dapi/rpc_asset_convertors.rs @@ -269,17 +269,20 @@ fn extract_collection_metadata( } pub fn to_authority( - authority: &AssetAuthority, + authority: &Option, mpl_core_collection: &Option, ) -> Vec { let update_authority = mpl_core_collection .clone() .and_then(|update_authority| update_authority.authority.value); + // even if there is no authority for asset we should not set Pubkey::default(), just empty string + let auth_key = update_authority + .map(|update_authority| update_authority.to_string()) + .unwrap_or(authority.as_ref().map(|auth| auth.authority.to_string()).unwrap_or("".to_string())); + vec![Authority { - address: update_authority - .map(|update_authority| update_authority.to_string()) - .unwrap_or(authority.authority.to_string()), + address: auth_key, scopes: vec![Scope::Full], }] } @@ -364,7 +367,8 @@ pub fn asset_to_rpc( &full_asset .asset_dynamic .onchain_data - .map(|onchain_data| onchain_data.value) + .as_ref() + .map(|onchain_data| onchain_data.value.clone()) .unwrap_or_default(), ) .unwrap_or(serde_json::Value::Null); @@ -378,29 +382,36 @@ pub fn asset_to_rpc( let mpl_core_info = match interface { Interface::MplCoreAsset | Interface::MplCoreCollection => Some(MplCoreInfo { - num_minted: full_asset.asset_dynamic.num_minted.map(|u| u.value), - current_size: full_asset.asset_dynamic.current_size.map(|u| u.value), + num_minted: full_asset.asset_dynamic.num_minted.as_ref().map(|u| u.value), + current_size: full_asset.asset_dynamic.current_size.as_ref().map(|u| u.value), plugins_json_version: full_asset .asset_dynamic .plugins_json_version + .as_ref() .map(|u| u.value), }), _ => None, }; let supply = match interface { - Interface::V1NFT => full_asset.edition_data.map(|e| Supply { - edition_nonce, - print_current_supply: e.supply, - print_max_supply: e.max_supply, - edition_number: e.edition_number, - }), + Interface::V1NFT => { + if let Some(edition_info) = &full_asset.edition_data { + Some(Supply { + edition_nonce, + print_current_supply: edition_info.supply, + print_max_supply: edition_info.max_supply, + edition_number: edition_info.edition_number, + }) + } else { + Some(Supply{ + edition_nonce, + print_current_supply: 0, + print_max_supply: Some(0), + edition_number: None, + }) + } + }, _ => None, }; - let tree = if full_asset.asset_leaf.tree_id == Pubkey::default() { - None - } else { - Some(full_asset.asset_leaf.tree_id.to_bytes().to_vec()) - }; Ok(Some(RpcAsset { interface, @@ -414,38 +425,7 @@ pub fn asset_to_rpc( .map(|m| m.value.into()) .unwrap_or(ChainMutability::Unknown) .into(), - compression: Some(Compression { - eligible: full_asset.asset_dynamic.is_compressible.value, - compressed: full_asset.asset_dynamic.is_compressed.value, - leaf_id: full_asset.asset_leaf.nonce.unwrap_or(0) as i64, - seq: std::cmp::max( - full_asset - .asset_dynamic - .seq - .clone() - .and_then(|u| u.value.try_into().ok()) - .unwrap_or(0) as i64, - full_asset.asset_leaf.leaf_seq.unwrap_or(0) as i64, - ), - tree: tree - .map(|s| bs58::encode(s).into_string()) - .unwrap_or_default(), - asset_hash: full_asset - .asset_leaf - .leaf - .map(|s| bs58::encode(s).into_string()) - .unwrap_or_default(), - data_hash: full_asset - .asset_leaf - .data_hash - .map(|e| e.to_string()) - .unwrap_or_default(), - creator_hash: full_asset - .asset_leaf - .creator_hash - .map(|e| e.to_string()) - .unwrap_or_default(), - }), + compression: Some(get_compression_info(&full_asset)), grouping, royalty: Some(Royalty { royalty_model: full_asset.asset_static.royalty_target_type.into(), @@ -544,6 +524,54 @@ pub fn asset_to_rpc( })) } +pub fn get_compression_info(full_asset: &FullAsset) -> Compression { + let tree = if full_asset.asset_leaf.tree_id == Pubkey::default() { + None + } else { + Some(full_asset.asset_leaf.tree_id.to_bytes().to_vec()) + }; + + if let Some(was_decompressed) = &full_asset.asset_dynamic.was_decompressed { + if was_decompressed.value { + return Compression::default(); + } + } + + Compression { + eligible: full_asset.asset_dynamic.is_compressible.value, + compressed: full_asset.asset_dynamic.is_compressed.value, + leaf_id: full_asset.asset_leaf.nonce.unwrap_or(0), + seq: std::cmp::max( + full_asset + .asset_dynamic + .seq + .clone() + .and_then(|u| u.value.try_into().ok()) + .unwrap_or(0), + full_asset.asset_leaf.leaf_seq.unwrap_or(0), + ), + tree: tree + .map(|s| bs58::encode(s).into_string()) + .unwrap_or_default(), + asset_hash: full_asset + .asset_leaf + .leaf + .as_ref() + .map(|s| bs58::encode(s).into_string()) + .unwrap_or_default(), + data_hash: full_asset + .asset_leaf + .data_hash + .map(|e| e.to_string()) + .unwrap_or_default(), + creator_hash: full_asset + .asset_leaf + .creator_hash + .map(|e| e.to_string()) + .unwrap_or_default(), + } +} + pub fn build_transaction_signatures_response( signatures: AssetSignatureWithPagination, limit: u64, diff --git a/nft_ingester/src/api/dapi/rpc_asset_models.rs b/nft_ingester/src/api/dapi/rpc_asset_models.rs index cad025511..efb6fef47 100644 --- a/nft_ingester/src/api/dapi/rpc_asset_models.rs +++ b/nft_ingester/src/api/dapi/rpc_asset_models.rs @@ -131,7 +131,7 @@ pub struct Authority { pub scopes: Vec, } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema, Default)] pub struct Compression { pub eligible: bool, pub compressed: bool, @@ -139,8 +139,8 @@ pub struct Compression { pub creator_hash: String, pub asset_hash: String, pub tree: String, - pub seq: i64, - pub leaf_id: i64, + pub seq: u64, + pub leaf_id: u64, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] @@ -294,7 +294,7 @@ pub struct FullAsset { pub asset_leaf: AssetLeaf, pub offchain_data: OffChainData, pub asset_collections: Option, - pub assets_authority: AssetAuthority, + pub assets_authority: Option, pub edition_data: Option, pub mpl_core_collections: Option, pub collection_dynamic_data: Option, diff --git a/nft_ingester/src/api/dapi/search_assets.rs b/nft_ingester/src/api/dapi/search_assets.rs index bcd9b5ef2..96d6918db 100644 --- a/nft_ingester/src/api/dapi/search_assets.rs +++ b/nft_ingester/src/api/dapi/search_assets.rs @@ -45,6 +45,7 @@ pub async fn search_assets< token_price_fetcher: Arc, metrics: Arc, tree_gaps_checker: &Option>, + native_mint_pubkey: &str, ) -> Result { if options.show_fungible { filter.token_type = Some(TokenType::All) @@ -75,6 +76,7 @@ pub async fn search_assets< filter.owner_address, account_balance_getter, token_price_fetcher.clone(), + native_mint_pubkey ) ); let native_balance = native_balance.unwrap_or_else(|e| { @@ -231,6 +233,7 @@ async fn fetch_native_balance( owner_address: Option>, account_balance_getter: Arc, token_price_fetcher: Arc, + native_mint_pubkey: &str, ) -> Result, StorageError> { if !show_native_balance { return Ok(None); @@ -246,7 +249,7 @@ async fn fetch_native_balance( .await .map_err(|e| StorageError::Common(format!("Account balance getter: {}", e)))?; let token_price = *token_price_fetcher - .fetch_token_prices(&[spl_token::native_mint::id()]) + .fetch_token_prices(&[native_mint_pubkey.to_string()]) .await .map_err(|e| StorageError::Common(e.to_string()))? .get(&spl_token::native_mint::id().to_string()) diff --git a/nft_ingester/src/api/service.rs b/nft_ingester/src/api/service.rs index 5f6c76f38..19234beec 100644 --- a/nft_ingester/src/api/service.rs +++ b/nft_ingester/src/api/service.rs @@ -1,6 +1,7 @@ use hyper::{header::CONTENT_TYPE, Body, Method, Request, Response, Server, StatusCode}; use jsonrpc_http_server::hyper; use jsonrpc_http_server::hyper::service::{make_service_fn, service_fn}; +use metrics_utils::red::RequestErrorDurationMetrics; use multer::Multipart; use postgre_client::PgClient; use std::sync::Arc; @@ -53,6 +54,7 @@ pub async fn start_api( rocks_db: Arc, rx: Receiver<()>, metrics: Arc, + red_metrics: Option>, port: u16, proof_checker: Option>, tree_gaps_checker: Option>, @@ -68,6 +70,7 @@ pub async fn start_api( file_storage_path: &str, account_balance_getter: Arc, storage_service_base_url: Option, + native_mint_pubkey: String, ) -> Result<(), DasApiError> { let response_middleware = RpcResponseMiddleware {}; let request_middleware = RpcRequestMiddleware::new(archives_dir); @@ -118,7 +121,12 @@ pub async fn start_api( json_middleware_config.unwrap_or_default(), account_balance_getter, storage_service_base_url, - Arc::new(RaydiumTokenPriceFetcher::default()), + Arc::new(RaydiumTokenPriceFetcher::new( + "https://api-v3.raydium.io".to_string(), + crate::raydium_price_fetcher::CACHE_TTL, + red_metrics, + )), + native_mint_pubkey, ); run_api( diff --git a/nft_ingester/src/api/util.rs b/nft_ingester/src/api/util.rs index 36f5fd4b7..9e0faf6e8 100644 --- a/nft_ingester/src/api/util.rs +++ b/nft_ingester/src/api/util.rs @@ -6,7 +6,7 @@ use entities::api_req_params::{ pub trait ApiRequest { fn get_all_pagination_parameters(&self) -> Pagination; fn get_sort_parameter(&self) -> Option; - fn get_options(&self) -> Option; + fn get_options(&self) -> GetByMethodsOptions; } macro_rules! impl_request_with_pagination { @@ -26,8 +26,8 @@ macro_rules! impl_request_with_pagination { self.sort_by.clone() } - fn get_options(&self) -> Option { - self.options.clone().map(Into::into) + fn get_options(&self) -> GetByMethodsOptions { + self.options.clone().into() } } }; diff --git a/nft_ingester/src/backfiller.rs b/nft_ingester/src/backfiller.rs index eee3886d6..7d88258c3 100644 --- a/nft_ingester/src/backfiller.rs +++ b/nft_ingester/src/backfiller.rs @@ -4,31 +4,30 @@ use async_trait::async_trait; use backfill_rpc::rpc::BackfillRPC; use entities::models::{BufferedTransaction, RawBlock}; use flatbuffers::FlatBufferBuilder; -use futures::future::join_all; use interface::error::{BlockConsumeError, StorageError, UsecaseError}; use interface::signature_persistence::{BlockConsumer, BlockProducer}; -use interface::slot_getter::FinalizedSlotGetter; use interface::slots_dumper::{SlotGetter, SlotsDumper}; use metrics_utils::BackfillerMetricsConfig; use plerkle_serialization::serializer::seralize_encoded_transaction_with_status; -use rocks_db::bubblegum_slots::{BubblegumSlotGetter, ForceReingestableSlots}; +use rocks_db::bubblegum_slots::ForceReingestableSlots; use rocks_db::column::TypedColumn; use rocks_db::transaction::{TransactionProcessor, TransactionResultPersister}; -use rocks_db::Storage; +use rocks_db::{SlotStorage, Storage}; use solana_program::pubkey::Pubkey; use solana_transaction_status::{ EncodedConfirmedTransactionWithStatusMeta, EncodedTransactionWithStatusMeta, UiConfirmedBlock, }; use std::collections::HashMap; -use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; +use std::time; use tokio::sync::broadcast::Receiver; -use tokio::sync::Mutex; -use tokio::task::{JoinError, JoinSet}; +use tokio::task::JoinError; use tokio::time::Duration; +use tokio_util::sync::CancellationToken; use tracing::{error, info, warn}; use usecase::bigtable::{is_bubblegum_transaction_encoded, BigTableClient}; -use usecase::slots_collector::{SlotsCollector, SlotsGetter}; +use usecase::slots_collector::SlotsGetter; + pub const GET_SIGNATURES_LIMIT: usize = 2000; pub const GET_SLOT_RETRIES: u32 = 3; pub const SECONDS_TO_WAIT_NEW_SLOTS: u64 = 10; @@ -82,7 +81,7 @@ impl BackfillSource { #[async_trait] impl SlotsGetter for BackfillSource { - async fn get_slots( + async fn get_slots_sorted_desc( &self, collected_key: &Pubkey, start_at: u64, @@ -92,10 +91,13 @@ impl SlotsGetter for BackfillSource { BackfillSource::Bigtable(bigtable) => { bigtable .big_table_inner_client - .get_slots(collected_key, start_at, rows_limit) + .get_slots_sorted_desc(collected_key, start_at, rows_limit) + .await + } + BackfillSource::Rpc(rpc) => { + rpc.get_slots_sorted_desc(collected_key, start_at, rows_limit) .await } - BackfillSource::Rpc(rpc) => rpc.get_slots(collected_key, start_at, rows_limit).await, } } } @@ -114,252 +116,6 @@ impl BlockProducer for BackfillSource { } } -pub async fn run_perpetual_slot_collection( - backfiller_clone: Arc>, - rpc_backfiller_clone: Arc, - metrics: Arc, - backfiller_wait_period_sec: u64, - rx: Receiver<()>, -) -> Result<(), JoinError> { - info!("Running slot fetcher..."); - - if let Err(e) = backfiller_clone - .run_perpetual_slot_collection( - metrics, - Duration::from_secs(backfiller_wait_period_sec), - rpc_backfiller_clone, - rx, - ) - .await - { - error!("Error while running perpetual slot fetcher: {}", e); - } - - info!("Slot fetcher finished working"); - - Ok(()) -} - -#[allow(clippy::too_many_arguments)] -pub async fn run_perpetual_slot_processing( - backfiller_clone: Arc>, - metrics: Arc, - slot_getter: Arc, - consumer: Arc, - producer: Arc, - backfiller_wait_period_sec: u64, - rx: Receiver<()>, - backup: Option>, -) -> Result<(), JoinError> -where - BC: BlockConsumer, - SG: SlotGetter, - BP: BlockProducer, -{ - info!("Running slot persister..."); - if let Err(e) = backfiller_clone - .run_perpetual_slot_processing( - metrics, - slot_getter, - consumer, - producer, - Duration::from_secs(backfiller_wait_period_sec), - rx, - backup, - ) - .await - { - error!("Error while running perpetual slot persister: {}", e); - } - info!("Slot persister finished working"); - - Ok(()) -} - -pub struct Backfiller { - rocks_client: Arc, - slots_getter: Arc, - slot_start_from: u64, - slot_parse_until: u64, - workers_count: usize, - chunk_size: usize, -} - -impl Backfiller { - pub fn new( - rocks_client: Arc, - slots_getter: Arc, - config: BackfillerConfig, - ) -> Backfiller { - Backfiller { - rocks_client, - slots_getter, - slot_start_from: config.slot_start_from, - slot_parse_until: config.get_slot_until(), - workers_count: config.workers_count, - chunk_size: config.chunk_size, - } - } - - pub async fn run_perpetual_slot_collection( - &self, - metrics: Arc, - wait_period: Duration, - finalized_slot_getter: Arc, - mut rx: Receiver<()>, - ) -> Result<(), IngesterError> { - info!("Starting perpetual slot parser"); - - let slots_collector = SlotsCollector::new( - self.rocks_client.clone(), - self.slots_getter.clone(), - metrics.clone(), - ); - - let top_collected_slot = self - .rocks_client - .get_parameter::(rocks_db::parameters::Parameter::LastFetchedSlot) - .await?; - let mut parse_until = self.slot_parse_until; - if let Some(slot) = top_collected_slot { - parse_until = slot; - } - loop { - match finalized_slot_getter.get_finalized_slot().await { - Ok(finalized_slot) => { - let top_collected_slot = slots_collector - .collect_slots( - &blockbuster::programs::bubblegum::ID, - finalized_slot, - parse_until, - &rx, - ) - .await; - if let Some(slot) = top_collected_slot { - parse_until = slot; - if let Err(e) = self - .rocks_client - .put_parameter(rocks_db::parameters::Parameter::LastFetchedSlot, slot) - .await - { - error!("Error while updating last fetched slot: {}", e); - } - } - } - Err(e) => { - error!("Error getting finalized slot: {}", e); - } - } - - let sleep = tokio::time::sleep(wait_period); - tokio::select! { - _ = sleep => {}, - _ = rx.recv() => { - info!("Received stop signal, stopping perpetual slot parser"); - return Ok(()); - }, - } - } - } - - #[allow(clippy::too_many_arguments)] - pub async fn run_perpetual_slot_processing( - &self, - metrics: Arc, - slot_getter: Arc, - block_consumer: Arc, - block_producer: Arc

, - wait_period: Duration, - rx: Receiver<()>, - backup_provider: Option>, - ) -> Result<(), IngesterError> - where - C: BlockConsumer, - P: BlockProducer, - S: SlotGetter, - { - let transactions_parser = Arc::new(TransactionsParser::new( - self.rocks_client.clone(), - slot_getter, - block_consumer, - block_producer, - metrics.clone(), - self.workers_count, - self.chunk_size, - )); - - let mut rx = rx.resubscribe(); - while rx.is_empty() { - transactions_parser - .process_all_slots(rx.resubscribe(), backup_provider.clone()) - .await; - tokio::select! { - _ = tokio::time::sleep(wait_period) => {}, - _ = rx.recv() => { - info!("Received stop signal, returning from run_perpetual_slot_fetching"); - return Ok(()); - } - } - } - Ok(()) - } - - pub async fn start_backfill( - &self, - tasks: Arc>>>, - rx: tokio::sync::broadcast::Receiver<()>, - metrics: Arc, - block_consumer: Arc, - block_producer: Arc

, - ) -> Result<(), IngesterError> - where - C: BlockConsumer, - P: BlockProducer, - { - info!("Backfiller is started"); - - let slots_collector = SlotsCollector::new( - self.rocks_client.clone(), - self.slots_getter.clone(), - metrics.clone(), - ); - let start_from = self.slot_start_from; - let parse_until = self.slot_parse_until; - let rx1 = rx.resubscribe(); - let rx2 = rx.resubscribe(); - tasks.lock().await.spawn(async move { - info!("Running slots parser..."); - slots_collector - .collect_slots( - &blockbuster::programs::bubblegum::ID, - start_from, - parse_until, - &rx1, - ) - .await; - Ok(()) - }); - - let transactions_parser = Arc::new(TransactionsParser::new( - self.rocks_client.clone(), - Arc::new(BubblegumSlotGetter::new(self.rocks_client.clone())), - block_consumer, - block_producer, - metrics.clone(), - self.workers_count, - self.chunk_size, - )); - tasks.lock().await.spawn(async move { - info!("Running transactions parser..."); - - transactions_parser.parse_transactions(rx2).await; - Ok(()) - }); - - Ok(()) - } -} - #[derive(Clone)] pub struct TransactionsParser { rocks_client: Arc, @@ -397,189 +153,6 @@ where } } - pub async fn parse_raw_transactions( - &self, - rx: Receiver<()>, - permits: usize, - start_slot: Option, - ) { - let mut max_slot = 0; - let slots_to_parse_iter = match start_slot { - Some(slot) => self.rocks_client.raw_blocks_cbor.iter(slot), - None => self.rocks_client.raw_blocks_cbor.iter_start(), - }; - let cnt = AtomicU64::new(0); - let mut slots_to_parse_vec = Vec::new(); - let semaphore = Arc::new(tokio::sync::Semaphore::new(permits)); - let mut tasks = Vec::new(); - for next in slots_to_parse_iter { - if !rx.is_empty() { - tracing::info!("terminating transactions parser"); - break; - } - - let (key_box, _value_box) = match next { - Ok((key_box, _value_box)) => (key_box, _value_box), - Err(e) => { - tracing::error!("Error getting next slot: {}", e); - continue; - } - }; - - let key = match RawBlock::decode_key(key_box.to_vec()) { - Ok(key) => key, - Err(e) => { - tracing::error!("Error decoding key: {}", e); - continue; - } - }; - - if key > max_slot { - max_slot = key; - } - - slots_to_parse_vec.push(key); - if slots_to_parse_vec.len() >= self.workers_count * self.chunk_size { - let permit = semaphore.clone().acquire_owned().await.unwrap(); - let slots = slots_to_parse_vec.clone(); - let c = self.consumer.clone(); - let p = self.producer.clone(); - let m = self.metrics.clone(); - let chunk_size = self.chunk_size; - let task_number = cnt.fetch_add(1, Ordering::Relaxed); - let rx = rx.resubscribe(); - tasks.push(tokio::task::spawn(async move { - let _permit = permit; - tracing::info!( - "Started a task {}, parsing {} slots", - task_number, - slots.len() - ); - let none: Option> = None; - let res = Self::parse_slots( - c, - p, - m, - chunk_size, - slots.as_slice(), - rx.resubscribe(), - none, - ) - .await; - if let Err(err) = res { - error!("Error parsing slots: {}", err); - } - tracing::info!("Task {} finished", task_number); - })); - slots_to_parse_vec.clear(); - } - } - if !slots_to_parse_vec.is_empty() { - let permit = semaphore.clone().acquire_owned().await.unwrap(); - let slots = slots_to_parse_vec.clone(); - let c = self.consumer.clone(); - let p = self.producer.clone(); - let m = self.metrics.clone(); - let chunk_size = self.chunk_size; - let task_number = cnt.fetch_add(1, Ordering::Relaxed); - tasks.push(tokio::task::spawn(async move { - let _permit = permit; - tracing::info!( - "Started a task {}, parsing {} slots", - task_number, - slots.len() - ); - let none: Option> = None; - let res = Self::parse_slots( - c, - p, - m, - chunk_size, - slots.as_slice(), - rx.resubscribe(), - none, - ) - .await; - if let Err(err) = res { - error!("Error parsing slots: {}", err); - } - tracing::info!("Task {} finished", task_number); - })); - } - - join_all(tasks).await; - - if let Err(e) = self - .rocks_client - .put_parameter(rocks_db::parameters::Parameter::LastFetchedSlot, max_slot) - .await - { - error!("Error while updating last fetched slot: {}", e); - } - - tracing::info!("Transactions parser has finished working"); - } - - pub async fn process_all_slots( - &self, - rx: Receiver<()>, - backup_provider: Option>, - ) { - let slots_iter = self.slot_getter.get_unprocessed_slots_iter(); - let chunk_size = self.workers_count * self.chunk_size; - - let mut slots_batch = Vec::with_capacity(chunk_size); - - for slot in slots_iter { - if !rx.is_empty() { - info!("Received stop signal, returning from process_all_slots"); - return; - } - slots_batch.push(slot); - if slots_batch.len() >= chunk_size { - info!("Got {} slots to parse", slots_batch.len()); - let res = self - .process_slots( - slots_batch.as_slice(), - rx.resubscribe(), - backup_provider.clone(), - ) - .await; - match res { - Ok(processed) => { - info!("Processed {} slots", processed); - } - Err(err) => { - error!("Error processing slots: {}", err); - } - } - slots_batch.clear(); - } - } - if !rx.is_empty() { - info!("Received stop signal, returning"); - return; - } - if !slots_batch.is_empty() { - info!("Got {} slots to parse", slots_batch.len()); - let res = self - .process_slots( - slots_batch.as_slice(), - rx.resubscribe(), - backup_provider.clone(), - ) - .await; - match res { - Ok(processed) => { - info!("Processed {} slots", processed); - } - Err(err) => { - error!("Error processing slots: {}", err); - } - } - } - } - pub async fn parse_transactions(&self, rx: Receiver<()>) { 'outer: while rx.is_empty() { let mut slots_to_parse_iter = self.slot_getter.get_unprocessed_slots_iter(); @@ -603,7 +176,7 @@ where } } } - let none: Option> = None; + let none: Option> = None; let res = self .process_slots(slots_to_parse_vec.as_slice(), rx.resubscribe(), none) .await; @@ -768,6 +341,98 @@ where } } } +pub async fn run_backfill_slots( + shutdown_token: CancellationToken, + db: Arc, + slot_db: Arc, + consumer: Arc, + metrics: Arc, +) where + C: BlockConsumer, +{ + loop { + if shutdown_token.is_cancelled() { + info!("Shutdown signal received, stopping run_backfill_slots"); + break; + } + let sleep = tokio::time::sleep(Duration::from_millis(400)); + if let Err(e) = backfill_slots( + &shutdown_token, + db.clone(), + slot_db.clone(), + consumer.clone(), + metrics.clone(), + ) + .await + { + error!("Error while backfilling slots: {}", e); + } + tokio::select! { + _ = sleep => {} + _ = shutdown_token.cancelled() => { + info!("Shutdown signal received, stopping run_backfill_slots"); + break; + } + } + } +} + +pub async fn backfill_slots( + shutdown_token: &CancellationToken, + db: Arc, + slot_db: Arc, + consumer: Arc, + metrics: Arc, +) -> Result<(), IngesterError> +where + C: BlockConsumer, +{ + let start_slot = db + .get_parameter::(rocks_db::parameters::Parameter::LastBackfilledSlot) + .await?; + slot_db + .db + .try_catch_up_with_primary() + .map_err(|e| IngesterError::DatabaseError(e.to_string()))?; + let mut it = slot_db + .db + .raw_iterator_cf(&slot_db.db.cf_handle(RawBlock::NAME).unwrap()); + if let Some(start_slot) = start_slot { + it.seek(&RawBlock::encode_key(start_slot)); + } else { + it.seek_to_first(); + } + while it.valid() { + if shutdown_token.is_cancelled() { + info!("Shutdown signal received, stopping backfill_slots"); + break; + } + if let Some((key, raw_block_data)) = it.item() { + let slot = RawBlock::decode_key(key.to_vec())?; + // Process the slot + let raw_block: RawBlock = match serde_cbor::from_slice(raw_block_data) { + Ok(rb) => rb, + Err(e) => { + error!("Failed to decode the value for slot {}: {}", slot, e); + continue; + } + }; + let block_time = raw_block.block.block_time.clone(); + if let Err(e) = consumer.consume_block(slot, raw_block.block).await { + error!("Error processing slot {}: {}", slot, e); + } + if let Some(block_time) = block_time { + let dur = time::SystemTime::now() + .duration_since(time::UNIX_EPOCH + Duration::from_secs(block_time as u64)) + .unwrap_or_default() + .as_millis() as f64; + metrics.set_slot_delay_time("raw_slot_backfilled", dur); + } + } + it.next(); + } + Ok(()) +} #[async_trait] impl BlockConsumer for DirectBlockParser @@ -952,7 +617,6 @@ where slot: u64, block: solana_transaction_status::UiConfirmedBlock, ) -> Result<(), BlockConsumeError> { - self.rocks_client.consume_block(slot, block.clone()).await?; self.direct_block_parser.consume_block(slot, block).await } diff --git a/nft_ingester/src/bin/api/main.rs b/nft_ingester/src/bin/api/main.rs index b46607cf1..a44b09a22 100644 --- a/nft_ingester/src/bin/api/main.rs +++ b/nft_ingester/src/bin/api/main.rs @@ -119,6 +119,7 @@ pub async fn main() -> Result<(), IngesterError> { pg_client.clone(), rocks_storage.clone(), json_downloader_metrics.clone(), + red_metrics.clone(), ) .await, )) @@ -149,6 +150,7 @@ pub async fn main() -> Result<(), IngesterError> { cloned_rocks_storage.clone(), cloned_rx, metrics.clone(), + Some(red_metrics.clone()), config.server_port, proof_checker, tree_gaps_checker, @@ -164,6 +166,7 @@ pub async fn main() -> Result<(), IngesterError> { config.file_storage_path_container.as_str(), account_balance_getter, config.storage_service_base_url, + config.native_mint_pubkey, ) .await { @@ -177,34 +180,35 @@ pub async fn main() -> Result<(), IngesterError> { }); // setup dependencies for grpc server - let uc = usecase::asset_streamer::AssetStreamer::new( - config.peer_grpc_max_gap_slots, - rocks_storage.clone(), - ); - let bs = usecase::raw_blocks_streamer::BlocksStreamer::new( - config.peer_grpc_max_gap_slots, - rocks_storage.clone(), - ); - let serv = grpc::service::PeerGapFillerServiceImpl::new( - Arc::new(uc), - Arc::new(bs), - rocks_storage.clone(), - ); - let addr = format!("0.0.0.0:{}", config.peer_grpc_port).parse()?; - let mut cloned_rx = shutdown_rx.resubscribe(); - // Spawn the gRPC server task and add to JoinSet - mutexed_tasks.lock().await.spawn(async move { - if let Err(e) = Server::builder() - .add_service(GapFillerServiceServer::new(serv)) - .serve_with_shutdown(addr, async { - cloned_rx.recv().await.unwrap(); - }) - .await - { - eprintln!("GRPC Server error: {}", e); - } - Ok(()) - }); + // TODO: add slots db to API server configuration and use it here to create BlocksStreamer, enable the grpc again + // let uc = usecase::asset_streamer::AssetStreamer::new( + // config.peer_grpc_max_gap_slots, + // rocks_storage.clone(), + // ); + // let bs = usecase::raw_blocks_streamer::BlocksStreamer::new( + // config.peer_grpc_max_gap_slots, + // rocks_storage.clone(), + // ); + // let serv = grpc::service::PeerGapFillerServiceImpl::new( + // Arc::new(uc), + // Arc::new(bs), + // rocks_storage.clone(), + // ); + // let addr = format!("0.0.0.0:{}", config.peer_grpc_port).parse()?; + // let mut cloned_rx = shutdown_rx.resubscribe(); + // // Spawn the gRPC server task and add to JoinSet + // mutexed_tasks.lock().await.spawn(async move { + // if let Err(e) = Server::builder() + // .add_service(GapFillerServiceServer::new(serv)) + // .serve_with_shutdown(addr, async { + // cloned_rx.recv().await.unwrap(); + // }) + // .await + // { + // eprintln!("GRPC Server error: {}", e); + // } + // Ok(()) + // }); // try synchronizing secondary rocksdb instance every config.rocks_sync_interval_seconds let cloned_rx = shutdown_rx.resubscribe(); @@ -225,6 +229,7 @@ pub async fn main() -> Result<(), IngesterError> { graceful_stop( mutexed_tasks, shutdown_tx, + None, guard, config.profiling_file_path_container, &config.heap_path, diff --git a/nft_ingester/src/bin/backfill/main.rs b/nft_ingester/src/bin/backfill/main.rs new file mode 100644 index 000000000..26f0cfd11 --- /dev/null +++ b/nft_ingester/src/bin/backfill/main.rs @@ -0,0 +1,384 @@ +use std::collections::HashSet; +use std::path::PathBuf; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; + +use clap::Parser; +use entities::models::RawBlock; +use indicatif::{ProgressBar, ProgressStyle}; +use interface::signature_persistence::BlockConsumer; +use metrics_utils::BackfillerMetricsConfig; +use metrics_utils::{red::RequestErrorDurationMetrics, IngesterMetricsConfig}; +use nft_ingester::{ + backfiller::DirectBlockParser, + processors::transaction_based::bubblegum_updates_processor::BubblegumTxProcessor, + transaction_ingester, +}; +use rocks_db::migrator::MigrationState; +use rocks_db::SlotStorage; +use rocks_db::{column::TypedColumn, Storage}; +use tokio_util::sync::CancellationToken; +use tracing::{error, info, warn}; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Path to the source RocksDB with slots (readonly) + #[arg(short, long)] + source_db_path: PathBuf, + + /// Path to the target RocksDB instance + #[arg(short, long)] + target_db_path: PathBuf, + + /// Optional starting slot number + #[arg(short, long)] + start_slot: Option, + + /// Number of concurrent workers (default: 16) + #[arg(short = 'w', long, default_value_t = 16)] + workers: usize, + + /// Optional comma-separated list of slot numbers to process + #[arg(short = 's', long)] + slots: Option, +} + +#[tokio::main] +async fn main() { + // Initialize tracing subscriber for logging + tracing_subscriber::fmt::init(); + + let args = Args::parse(); + + // Open source RocksDB in readonly mode + let source_db = + Storage::open_readonly_with_cfs_only_db(&args.source_db_path, SlotStorage::cf_names()) + .expect("Failed to open source RocksDB"); + let red_metrics = Arc::new(RequestErrorDurationMetrics::new()); + + // Open target RocksDB + let target_db = Arc::new( + Storage::open( + &args.target_db_path, + Arc::new(tokio::sync::Mutex::new(tokio::task::JoinSet::new())), + red_metrics.clone(), + MigrationState::Last, + ) + .expect("Failed to open target RocksDB"), + ); + + // Initialize the DirectBlockParser + let ingester_metrics = Arc::new(IngesterMetricsConfig::new()); + let metrics = Arc::new(BackfillerMetricsConfig::new()); + let bubblegum_updates_processor = Arc::new(BubblegumTxProcessor::new( + target_db.clone(), + ingester_metrics.clone(), + )); + + let tx_ingester = Arc::new(transaction_ingester::BackfillTransactionIngester::new( + bubblegum_updates_processor.clone(), + )); + + let consumer = Arc::new(DirectBlockParser::new( + tx_ingester.clone(), + target_db.clone(), + metrics.clone(), + )); + + // Concurrency setup + let num_workers = args.workers; + let (slot_sender, slot_receiver) = async_channel::bounded::<(u64, Vec)>(num_workers * 2); + let slots_processed = Arc::new(AtomicU64::new(0)); + let rate = Arc::new(Mutex::new(0.0)); + + // Spawn a task to handle graceful shutdown on Ctrl+C + let shutdown_token = CancellationToken::new(); + let shutdown_token_clone = shutdown_token.clone(); + + let slot_sender_clone = slot_sender.clone(); + tokio::spawn(async move { + // Wait for Ctrl+C signal + match tokio::signal::ctrl_c().await { + Ok(()) => { + info!("Received Ctrl+C, shutting down gracefully..."); + shutdown_token_clone.cancel(); + // Close the channel to signal workers to stop + slot_sender_clone.close(); + } + Err(err) => { + error!("Unable to listen for shutdown signal: {}", err); + } + } + }); + + // Parse slots if provided + let mut slots_to_process = Vec::new(); + if let Some(slots_str) = args.slots { + info!("Processing specific slots provided via command line."); + for part in slots_str.split(',') { + let slot_str = part.trim(); + if let Ok(slot) = slot_str.parse::() { + slots_to_process.push(slot); + } else { + warn!("Invalid slot number provided: {}", slot_str); + } + } + + // Remove duplicates and sort slots + let mut slots_set = HashSet::new(); + slots_to_process = slots_to_process + .into_iter() + .filter(|x| slots_set.insert(*x)) + .collect(); + slots_to_process.sort_unstable(); + + if slots_to_process.is_empty() { + error!("No valid slots to process. Exiting."); + return; + } + + info!("Total slots to process: {}", slots_to_process.len()); + } + + // Set up progress bar + let total_slots = if !slots_to_process.is_empty() { + slots_to_process.len() as u64 + } else { + // Get the last slot + let mut iter = source_db.raw_iterator_cf(&source_db.cf_handle(RawBlock::NAME).unwrap()); + iter.seek_to_last(); + if !iter.valid() { + error!("Failed to seek to last slot"); + return; + } + let last_slot = iter + .key() + .map(|k| u64::from_be_bytes(k.try_into().expect("Failed to decode the last slot key"))) + .expect("Failed to get the last slot"); + + // Determine the starting slot + let start_slot = if let Some(start_slot) = args.start_slot { + info!("Starting from slot: {}", start_slot); + start_slot + } else { + iter.seek_to_first(); + iter.key() + .map(|k| { + u64::from_be_bytes(k.try_into().expect("Failed to decode the start slot key")) + }) + .expect("Failed to get the start slot") + }; + + info!("Start slot: {}, Last slot: {}", start_slot, last_slot); + last_slot - start_slot + 1 + }; + + let progress_bar = Arc::new(ProgressBar::new(total_slots)); + progress_bar.set_style( + ProgressStyle::default_bar() + .template( + "{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {percent}% \ + ({pos}/{len}) {msg}", + ) + .expect("Failed to set progress bar style") + .progress_chars("#>-"), + ); + + // Spawn worker tasks + let mut worker_handles = Vec::new(); + for _ in 0..num_workers { + let consumer = consumer.clone(); + let progress_bar = progress_bar.clone(); + let slots_processed = slots_processed.clone(); + let rate = rate.clone(); + let shutdown_token = shutdown_token.clone(); + + let slot_receiver = slot_receiver.clone(); + + let handle = tokio::spawn(async move { + while let Ok((slot, raw_block_data)) = slot_receiver.recv().await { + if shutdown_token.is_cancelled() { + break; + } + + // Process the slot + let raw_block: RawBlock = match serde_cbor::from_slice(&raw_block_data) { + Ok(rb) => rb, + Err(e) => { + error!("Failed to decode the value for slot {}: {}", slot, e); + continue; + } + }; + + if let Err(e) = consumer.consume_block(slot, raw_block.block).await { + error!("Error processing slot {}: {}", slot, e); + } + + // Increment slots_processed + let current_slots_processed = slots_processed.fetch_add(1, Ordering::Relaxed) + 1; + + // Update progress bar position and message + progress_bar.inc(1); + + let current_rate = { + let rate_guard = rate.lock().unwrap(); + *rate_guard + }; + progress_bar.set_message(format!( + "Slots Processed: {} Current Slot: {} Rate: {:.2}/s", + current_slots_processed, slot, current_rate + )); + } + }); + + worker_handles.push(handle); + } + + // Spawn a task to update the rate periodically + let slots_processed_clone = slots_processed.clone(); + let rate_clone = rate.clone(); + let shutdown_token_clone = shutdown_token.clone(); + + tokio::spawn(async move { + let mut last_time = std::time::Instant::now(); + let mut last_count = slots_processed_clone.load(Ordering::Relaxed); + + loop { + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + if shutdown_token_clone.is_cancelled() { + break; + } + + let current_time = std::time::Instant::now(); + let current_count = slots_processed_clone.load(Ordering::Relaxed); + + let elapsed = current_time.duration_since(last_time).as_secs_f64(); + let count = current_count - last_count; + + let current_rate = if elapsed > 0.0 { + (count as f64) / elapsed + } else { + 0.0 + }; + + // Update rate + { + let mut rate_guard = rate_clone.lock().unwrap(); + *rate_guard = current_rate; + } + + // Update for next iteration + last_time = current_time; + last_count = current_count; + } + }); + + // Send slots to the channel + if !slots_to_process.is_empty() { + // Process only the specified slots + send_slots_to_workers( + slots_to_process, + source_db, + slot_sender.clone(), + shutdown_token.clone(), + ) + .await; + } else { + // Process all slots from start_slot + send_all_slots_to_workers( + source_db, + slot_sender.clone(), + shutdown_token.clone(), + args.start_slot, + ) + .await; + } + + // Close the sender to signal that no more items will be sent + slot_sender.close(); + + // Wait for workers to finish + for handle in worker_handles { + let _ = handle.await; + } + + progress_bar.finish_with_message("Processing complete"); +} + +// Function to send specified slots to workers +async fn send_slots_to_workers( + slots_to_process: Vec, + source_db: rocksdb::DB, + slot_sender: async_channel::Sender<(u64, Vec)>, + shutdown_token: CancellationToken, +) { + let cf_handle = source_db.cf_handle(RawBlock::NAME).unwrap(); + + for slot in slots_to_process { + if shutdown_token.is_cancelled() { + info!("Shutdown signal received. Stopping the submission of new slots."); + break; + } + + let key = RawBlock::encode_key(slot); + match source_db.get_pinned_cf(&cf_handle, key) { + Ok(Some(value)) => { + let raw_block_data = value.to_vec(); + if slot_sender.send((slot, raw_block_data)).await.is_err() { + error!("Failed to send slot {} to workers", slot); + break; + } + } + Ok(None) => { + warn!("Slot {} not found in source database", slot); + } + Err(e) => { + error!("Error fetching slot {}: {}", slot, e); + } + } + } +} + +// Function to send all slots starting from start_slot to workers +async fn send_all_slots_to_workers( + source_db: rocksdb::DB, + slot_sender: async_channel::Sender<(u64, Vec)>, + shutdown_token: CancellationToken, + start_slot: Option, +) { + let cf_handle = source_db.cf_handle(RawBlock::NAME).unwrap(); + let mut iter = source_db.raw_iterator_cf(&cf_handle); + + // Determine starting point + if let Some(start_slot) = start_slot { + iter.seek(RawBlock::encode_key(start_slot)); + } else { + iter.seek_to_first(); + } + + // Send slots to the channel + while iter.valid() { + if shutdown_token.is_cancelled() { + info!("Shutdown signal received. Stopping the submission of new slots."); + break; + } + + if let Some((key, value)) = iter.item() { + let slot = u64::from_be_bytes(key.try_into().expect("Failed to decode the slot key")); + let raw_block_data = value.to_vec(); + + // Send the slot and data to the channel + if slot_sender.send((slot, raw_block_data)).await.is_err() { + error!("Failed to send slot {} to workers", slot); + break; + } + + // Move to the next slot + iter.next(); + } else { + break; + } + } +} diff --git a/nft_ingester/src/bin/explorer/main.rs b/nft_ingester/src/bin/explorer/main.rs new file mode 100644 index 000000000..8c90e5e59 --- /dev/null +++ b/nft_ingester/src/bin/explorer/main.rs @@ -0,0 +1,321 @@ +use axum::{ + extract::{Extension, Query}, + http::StatusCode, + routing::get, + Json, Router, +}; +use clap::Parser; +use itertools::Itertools; +use metrics_utils::ApiMetricsConfig; +use prometheus_client::registry::Registry; +use rocks_db::Storage; +use rocks_db::{asset, migrator::MigrationState}; +use rocksdb::{ColumnFamilyDescriptor, Options, DB}; +use serde::Deserialize; +use std::net::SocketAddr; +use std::sync::Arc; +use tempfile::TempDir; +use tokio::sync::Mutex; +use tokio::task::JoinSet; + +#[derive(Parser)] +struct Config { + /// Primary DB path + #[clap(short('d'), long)] + primary_db_path: String, + + /// Secondary DB path (optional) + #[clap(short, long)] + secondary_db_path: Option, + + /// Port (defaults to 8086) + #[clap(short, long, default_value = "8086")] + port: u16, +} + +struct AppState { + db: Arc, +} + +#[derive(Deserialize)] +struct IterateKeysParams { + cf_name: String, + limit: Option, + start_key: Option, +} + +#[derive(Deserialize)] +struct IterateKeysPatternParams { + cf_name: String, + pattern: String, + limit: Option, +} + +#[derive(Deserialize)] +struct GetValueParams { + cf_name: String, + key: String, +} + +#[tokio::main] +async fn main() { + // Parse command-line arguments + let config = Config::parse(); + + // Handle secondary DB path + let temp_dir; + let secondary_db_path = if let Some(ref path) = config.secondary_db_path { + path.clone() + } else { + temp_dir = TempDir::new().expect("Failed to create temp directory"); + temp_dir.path().to_str().unwrap().to_string() + }; + + let mut registry = Registry::default(); + let metrics = Arc::new(ApiMetricsConfig::new()); + metrics.register(&mut registry); + let red_metrics = Arc::new(metrics_utils::red::RequestErrorDurationMetrics::new()); + red_metrics.register(&mut registry); + + let options = Options::default(); + + let cf_names = + DB::list_cf(&options, &config.primary_db_path).expect("Failed to list column families."); + + let cfs: Vec = cf_names + .into_iter() + .map(|name| { + let mut cf_options = Options::default(); + cf_options.set_merge_operator_associative( + &format!("merge_fn_merge_{}", &name), + asset::AssetStaticDetails::merge_keep_existing, + ); + ColumnFamilyDescriptor::new(&name, cf_options) + }) + .collect(); + + let db = DB::open_cf_descriptors_as_secondary( + &options, + &config.primary_db_path, + &secondary_db_path, + cfs, + ) + .expect("Failed to open DB."); + + // Open the primary RocksDB database + let db = Arc::new(db); + + let app_state = AppState { db }; + + // Build our application with the routes + let app = Router::new() + .route("/iterate_keys", get(iterate_keys)) + .route("/iterate_keys_with_pattern", get(iterate_keys_with_pattern)) + .route("/get_value", get(get_value)) + .layer(Extension(Arc::new(app_state))); + + // Run our app with hyper + let addr = SocketAddr::from(([0, 0, 0, 0], config.port)); + println!("Listening on {}", addr); + axum::Server::bind(&addr) + .serve(app.into_make_service()) + .await + .unwrap(); +} + +async fn iterate_keys( + Extension(state): Extension>, + Query(params): Query, +) -> Result>, (StatusCode, String)> { + let db = &state.db; + + // Extract parameters + let cf_name = ¶ms.cf_name; + let limit = params.limit.unwrap_or(10); // Default limit if not provided + + // Decode start_key if provided + let start_key = if let Some(ref s) = params.start_key { + match bs58::decode(s).into_vec() { + Ok(bytes) => Some(bytes), + Err(_) => { + return Err(( + StatusCode::BAD_REQUEST, + "Invalid Base58 start_key".to_string(), + )) + } + } + } else { + None + }; + + // Call the iterate_keys function + match iterate_keys_function(db, cf_name, start_key.as_deref(), limit) { + Ok(keys) => Ok(Json(keys)), + Err(err_msg) => Err((StatusCode::INTERNAL_SERVER_ERROR, err_msg)), + } +} + +async fn iterate_keys_with_pattern( + Extension(state): Extension>, + Query(params): Query, +) -> Result>, (StatusCode, String)> { + let db = &state.db; + + // Extract parameters + let cf_name = ¶ms.cf_name; + let limit = params.limit.unwrap_or(10); // Default limit if not provided + + // Decode the pattern from Base58 + let pattern_bytes = match bs58::decode(¶ms.pattern).into_vec() { + Ok(bytes) => bytes, + Err(_) => { + return Err(( + StatusCode::BAD_REQUEST, + "Invalid Base58 pattern".to_string(), + )) + } + }; + + // Call the iterate_keys_with_pattern function + match iterate_keys_with_pattern_function(db, cf_name, &pattern_bytes, limit) { + Ok(keys) => Ok(Json(keys)), + Err(err_msg) => Err((StatusCode::INTERNAL_SERVER_ERROR, err_msg)), + } +} + +async fn get_value( + Extension(state): Extension>, + Query(params): Query, +) -> Result, (StatusCode, String)> { + let db = &state.db; + + // Extract parameters + let cf_name = ¶ms.cf_name; + + // Decode the key from Base58 + let key_bytes = match bs58::decode(¶ms.key).into_vec() { + Ok(bytes) => bytes, + Err(_) => return Err((StatusCode::BAD_REQUEST, "Invalid Base58 key".to_string())), + }; + + // Call the get_value function + match get_value_function(db, cf_name, &key_bytes) { + Ok(Some(value)) => Ok(Json(value)), + Ok(None) => Err((StatusCode::NOT_FOUND, "Key not found".to_string())), + Err(err_msg) => Err((StatusCode::INTERNAL_SERVER_ERROR, err_msg)), + } +} + +/// Iterates over keys in a specified RocksDB column family, starting from an optional key, +/// and returns up to `limit` Base58-encoded keys. +/// +/// # Parameters +/// +/// - `db`: Reference to the RocksDB database. +/// - `cf_name`: The name of the column family to iterate over. +/// - `start_key`: Optional starting key to begin iteration. +/// - `limit`: Maximum number of keys to return. +/// +/// # Returns +/// +/// A `Result` containing a vector of Base58-encoded keys or an error message. +fn iterate_keys_function( + db: &DB, + cf_name: &str, + start_key: Option<&[u8]>, + limit: usize, +) -> Result, String> { + // Get the column family handle + let cf_handle = db + .cf_handle(cf_name) + .ok_or_else(|| "Column family not found".to_string())?; + + // Create an iterator with the specified starting point + let iter_mode = match start_key { + Some(key) => rocksdb::IteratorMode::From(key, rocksdb::Direction::Forward), + None => rocksdb::IteratorMode::Start, + }; + + let iterator = db.iterator_cf(&cf_handle, iter_mode); + + // Collect keys up to the specified limit + let keys: Vec = iterator + .take(limit) + .filter_map(Result::ok) + .map(|(key, _)| bs58::encode(key).into_string()) + .collect(); + + Ok(keys) +} + +/// Retrieves the value for a given key from a specified RocksDB column family, +/// and returns it as a Base58-encoded string. +/// +/// # Parameters +/// +/// - `db`: Reference to the RocksDB database. +/// - `cf_name`: The name of the column family. +/// - `key`: The key to retrieve the value for. +/// +/// # Returns +/// +/// A `Result` containing an `Option` with the Base58-encoded value if the key exists, +/// or an error message. +fn get_value_function(db: &DB, cf_name: &str, key: &[u8]) -> Result, String> { + // Get the column family handle + let cf_handle = db + .cf_handle(cf_name) + .ok_or_else(|| "Column family not found".to_string())?; + + // Retrieve the value for the key + match db.get_cf(&cf_handle, key) { + Ok(Some(value)) => Ok(Some(bs58::encode(value).into_string())), + Ok(None) => Ok(None), + Err(e) => Err(format!("DB error: {}", e)), + } +} + +/// Iterates over keys in a specified RocksDB column family, +/// filtering keys that include a given byte pattern, +/// and returns up to `limit` Base58-encoded keys. +/// +/// # Parameters +/// +/// - `db`: Reference to the RocksDB database. +/// - `cf_name`: The name of the column family to iterate over. +/// - `pattern`: Byte pattern to match within the keys. +/// - `limit`: Maximum number of keys to return. +/// +/// # Returns +/// +/// A `Result` containing a vector of Base58-encoded keys or an error message. +fn iterate_keys_with_pattern_function( + db: &DB, + cf_name: &str, + pattern: &[u8], + limit: usize, +) -> Result, String> { + // Get the column family handle + let cf_handle = &db + .cf_handle(cf_name) + .ok_or_else(|| "Column family not found".to_string())?; + + // Create an iterator starting from the beginning + let iter_mode = rocksdb::IteratorMode::Start; + let iterator = db.iterator_cf(cf_handle, iter_mode); + + // Collect keys up to the specified limit that match the pattern + let keys: Vec = iterator + .filter_map(Result::ok) + .filter_map(|(key, _)| { + if key.windows(pattern.len()).any(|window| window == pattern) { + Some(bs58::encode(key).into_string()) + } else { + None + } + }) + .take(limit) + .collect(); + + Ok(keys) +} diff --git a/nft_ingester/src/bin/explorer/readme.md b/nft_ingester/src/bin/explorer/readme.md new file mode 100644 index 000000000..b75cc58aa --- /dev/null +++ b/nft_ingester/src/bin/explorer/readme.md @@ -0,0 +1,120 @@ +# Explorer + +Explorer is a small RESTful service built in Rust that allows you to interact with a RocksDB database. It provides endpoints to: + +- **Iterate over keys** in a specified column family. +- **Retrieve the value** for a specific key. + +Both keys and values are Base58-encoded for safe transmission over HTTP. + +## Table of Contents + +- [Explorer](#explorer) + - [Table of Contents](#table-of-contents) + - [Features](#features) + - [Prerequisites](#prerequisites) + - [Building the Project](#building-the-project) + - [Running the Service](#running-the-service) + - [Optional Arguments](#optional-arguments) + - [Example with all arguments:](#example-with-all-arguments) + - [Configuration](#configuration) + - [API Endpoints](#api-endpoints) + - [Iterate Keys](#iterate-keys) + - [Example Request:](#example-request) + - [Get Value](#get-value) + - [Example Request:](#example-request-1) + - [Acknowledgements](#acknowledgements) + +## Features + +- **Iterate Keys**: Retrieve a list of Base58-encoded keys from a specified column family in RocksDB. +- **Get Value**: Retrieve the Base58-encoded value for a given key. +- **Command-line Configuration**: Specify database paths and server port via command-line arguments. +- **RESTful API**: Accessible via HTTP requests using `curl` or any HTTP client. + +## Prerequisites + +- **Rust**: You need to have Rust installed. You can install Rust using [rustup](https://www.rust-lang.org/tools/install). +- **Cargo**: Cargo is the Rust package manager, which comes with Rust installation. +- **RocksDB Database**: A RocksDB database that you want to interact with. + +## Building the Project + +Clone the repository and navigate to the project directory: + +```bash +git clone https://github.com/metaplex-foundation/aura.git +cd nft_ingester +``` + +Build the project using Cargo: + +```bash +cargo build --release --bin explorer +``` + +## Running the Service + +Run the compiled binary with the required arguments: + +```bash +../target/release/explorer --primary-db-path /path/to/your/primary/db +``` +### Optional Arguments + +• --secondary-db-path: Path to the secondary RocksDB database. If not provided, a temporary directory is used. +• --port: Port number for the REST service (defaults to 8086). + + +### Example with all arguments: + +```bash +./target/release/explorer \ + --primary-db-path /path/to/your/primary/db \ + --secondary-db-path /path/to/your/secondary/db \ + --port 8086 +``` + +## Configuration + +The service accepts the following command-line arguments: + +• --primary-db-path (**required**): Path to the primary RocksDB database. +• --secondary-db-path (optional): Path to the secondary RocksDB database. +• --port (optional): Port number for the REST service (default is 8086). + +## API Endpoints + +### Iterate Keys + +• URL: /iterate_keys +• Method: GET +• Query Parameters: +• cf_name (required): Name of the column family. +• limit (optional): Maximum number of keys to return (default is 10). +• start_key (optional): Base58-encoded key to start iteration from. + +#### Example Request: +```bash +curl "http://localhost:8086/iterate_keys?cf_name=default&limit=5" +``` + +### Get Value + +• URL: /get_value +• Method: GET +• Query Parameters: +• cf_name (required): Name of the column family. +• key (required): Base58-encoded key whose value is to be retrieved. + +#### Example Request: +```bash +curl "http://localhost:8086/get_value?cf_name=default&key=3vQB7B6MrGQZaxCuFg4oh" +``` + +## Acknowledgements + +• [RocksDB](https://rocksdb.org) for the high-performance key-value database. +• [Axum](https://github.com/tokio-rs/axum) for the ergonomic and modular web framework. +• [bs58](https://docs.rs/bs58/) crate for Base58 encoding and decoding. +• [Clap](https://clap.rs/) for command-line argument parsing. \ No newline at end of file diff --git a/nft_ingester/src/bin/ingester/main.rs b/nft_ingester/src/bin/ingester/main.rs index bd8d844e4..468069201 100644 --- a/nft_ingester/src/bin/ingester/main.rs +++ b/nft_ingester/src/bin/ingester/main.rs @@ -11,6 +11,7 @@ use std::str::FromStr; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use std::time::Duration; +use tokio_util::sync::CancellationToken; use clap::Parser; use futures::FutureExt; @@ -19,7 +20,6 @@ use grpc::gapfiller::gap_filler_service_server::GapFillerServiceServer; use nft_ingester::json_worker; use plerkle_messenger::ConsumptionType; use pprof::ProfilerGuardBuilder; -use rocks_db::bubblegum_slots::{BubblegumSlotGetter, IngestableSlotGetter}; use solana_client::nonblocking::rpc_client::RpcClient; use tokio::sync::{broadcast, Mutex}; use tokio::task::JoinSet; @@ -36,8 +36,7 @@ use nft_ingester::ack::create_ack_channel; use nft_ingester::api::account_balance::AccountBalanceGetterImpl; use nft_ingester::api::service::start_api; use nft_ingester::backfiller::{ - run_perpetual_slot_collection, run_perpetual_slot_processing, run_slot_force_persister, BackfillSource, Backfiller, - DirectBlockParser, ForceReingestableSlotGetter, TransactionsParser, + run_slot_force_persister, BackfillSource, DirectBlockParser, ForceReingestableSlotGetter, TransactionsParser, }; use nft_ingester::batch_mint::batch_mint_processor::{process_batch_mints, BatchMintProcessor, NoopBatchMintTxSender}; use nft_ingester::buffer::{debug_buffer, Buffer}; @@ -46,7 +45,6 @@ use nft_ingester::config::{ setup_config, ApiConfig, BackfillerConfig, BackfillerMode, IngesterConfig, MessageSource, INGESTER_CONFIG_PREFIX, }; use nft_ingester::gapfiller::{process_asset_details_stream_wrapper, run_sequence_consistent_gapfiller}; -use nft_ingester::index_syncronizer::Synchronizer; use nft_ingester::init::{graceful_stop, init_index_storage_with_migration, init_primary_storage}; use nft_ingester::json_worker::JsonWorker; use nft_ingester::message_handler::MessageHandlerIngester; @@ -58,9 +56,9 @@ use nft_ingester::rocks_db::{perform_backup, receive_last_saved_slot, restore_ro use nft_ingester::tcp_receiver::{connect_to_geyser, connect_to_snapshot_receiver, TcpReceiver}; use nft_ingester::transaction_ingester::BackfillTransactionIngester; use nft_ingester::{config::init_logger, error::IngesterError}; -use rocks_db::backup_service; use rocks_db::backup_service::BackupService; use rocks_db::storage_traits::AssetSlotStorage; +use rocks_db::{backup_service, SlotStorage}; use tonic::transport::Server; use usecase::asset_streamer::AssetStreamer; use usecase::proofs::MaybeProofChecker; @@ -79,17 +77,27 @@ pub const DEFAULT_MAX_POSTGRES_CONNECTIONS: u32 = 100; pub const SECONDS_TO_RETRY_IDXS_CLEANUP: u64 = 15 * 60; // 15 minutes #[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] struct Args { #[arg(short, long)] restore_rocks_db: bool, + + /// Path to the RocksDB instance with slots + #[arg(short, long)] + slots_db_path: PathBuf, + + /// Path to the secondary RocksDB instance with slots + #[arg(long)] + secondary_slots_db_path: PathBuf, } #[tokio::main(flavor = "multi_thread")] pub async fn main() -> Result<(), IngesterError> { - info!("Starting Ingester..."); + let args = Args::parse(); let config = setup_config::(INGESTER_CONFIG_PREFIX); init_logger(&config.get_log_level()); + info!("Starting Ingester..."); let mut metrics_state = MetricState::new(); metrics_state.register_metrics(); @@ -101,11 +109,10 @@ pub async fn main() -> Result<(), IngesterError> { }); // try to restore rocksDB first - if Args::parse().restore_rocks_db { + if args.restore_rocks_db { restore_rocksdb(&config).await?; } - let buffer = Arc::new(Buffer::new()); let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); let (shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); @@ -125,87 +132,11 @@ pub async fn main() -> Result<(), IngesterError> { .await?, ); - let synchronizer = Arc::new(Synchronizer::new( - primary_rocks_storage.clone(), - index_pg_storage.clone(), - index_pg_storage.clone(), - config.dump_synchronizer_batch_size, - config.dump_path.to_string(), - metrics_state.synchronizer_metrics.clone(), - config.synchronizer_parallel_tasks, - config.run_temp_sync_during_dump, - )); - - if config.run_dump_synchronize_on_start { - info!("Running dump synchronizer on start!"); - for asset_type in ASSET_TYPES { - let synchronizer = synchronizer.clone(); - let shutdown_rx = shutdown_rx.resubscribe(); - - mutexed_tasks.lock().await.spawn(async move { - if let Err(e) = synchronizer - .full_syncronize(&shutdown_rx.resubscribe(), asset_type) - .await - { - error!("Failed to syncronize on {:?} with error {}", asset_type, e); - panic!("Failed to syncronize on {:?} with error {}", asset_type, e); - } - - Ok(()) - }); - } - } - while let Some(res) = mutexed_tasks.lock().await.join_next().await { - match res { - Ok(_) => {} - Err(err) if err.is_panic() => panic::resume_unwind(err.into_panic()), - Err(err) => panic!("{err}"), - } - } - - // setup receiver - let message_handler = Arc::new(MessageHandlerIngester::new(buffer.clone())); - let (geyser_tcp_receiver, geyser_addr) = ( - TcpReceiver::new(message_handler.clone(), config.tcp_config.get_tcp_receiver_reconnect_interval()?), - config.tcp_config.get_tcp_receiver_addr_ingester()?, - ); - // For now there is no snapshot mechanism via Redis, so we use snapshot_tcp_receiver for this purpose - let (snapshot_tcp_receiver, snapshot_addr) = ( - TcpReceiver::new(message_handler.clone(), config.tcp_config.get_tcp_receiver_reconnect_interval()? * 2), - config.tcp_config.get_snapshot_addr_ingester()?, - ); - - let cloned_rx = shutdown_rx.resubscribe(); - let ack_channel = create_ack_channel(cloned_rx, config.redis_messenger_config.clone(), mutexed_tasks.clone()).await; - - let cloned_rx = shutdown_rx.resubscribe(); - if config.message_source == MessageSource::TCP { - mutexed_tasks - .lock() - .await - .spawn(connect_to_geyser(geyser_tcp_receiver, geyser_addr, cloned_rx)); - } - - let cloned_rx = shutdown_rx.resubscribe(); - mutexed_tasks - .lock() - .await - .spawn(connect_to_snapshot_receiver(snapshot_tcp_receiver, snapshot_addr, cloned_rx)); - - let cloned_buffer = buffer.clone(); - let cloned_rx = shutdown_rx.resubscribe(); - let cloned_metrics = metrics_state.ingester_metrics.clone(); - if config.message_source == MessageSource::TCP { - mutexed_tasks - .lock() - .await - .spawn(debug_buffer(cloned_rx, cloned_buffer, cloned_metrics)); - } - + // todo: remove backup service from here and move it to a separate process with a secondary db - verify it's possible first! // start backup service - let backup_service = BackupService::new(primary_rocks_storage.db.clone(), &backup_service::load_config()?)?; - let cloned_metrics = metrics_state.ingester_metrics.clone(); if config.store_db_backups() { + let backup_service = BackupService::new(primary_rocks_storage.db.clone(), &backup_service::load_config()?)?; + let cloned_metrics = metrics_state.ingester_metrics.clone(); let cloned_rx = shutdown_rx.resubscribe(); mutexed_tasks .lock() @@ -213,34 +144,62 @@ pub async fn main() -> Result<(), IngesterError> { .spawn(perform_backup(backup_service, cloned_rx, cloned_metrics)); } + let geyser_bubblegum_updates_processor = + Arc::new(BubblegumTxProcessor::new(primary_rocks_storage.clone(), metrics_state.ingester_metrics.clone())); let rpc_client = Arc::new(RpcClient::new(config.rpc_host.clone())); - for _ in 0..config.accounts_parsing_workers { - match config.message_source { - MessageSource::Redis => { - let redis_receiver = Arc::new( - RedisReceiver::new( - config.redis_messenger_config.clone(), - ConsumptionType::All, - ack_channel.clone(), - ) - .await?, - ); + + match config.message_source { + MessageSource::TCP => { + let buffer = Arc::new(Buffer::new()); + // setup receiver + let message_handler = Arc::new(MessageHandlerIngester::new(buffer.clone())); + let (geyser_tcp_receiver, geyser_addr) = ( + TcpReceiver::new(message_handler.clone(), config.tcp_config.get_tcp_receiver_reconnect_interval()?), + config.tcp_config.get_tcp_receiver_addr_ingester()?, + ); + let cloned_rx = shutdown_rx.resubscribe(); + mutexed_tasks + .lock() + .await + .spawn(connect_to_geyser(geyser_tcp_receiver, geyser_addr, cloned_rx)); + let (snapshot_tcp_receiver, snapshot_addr) = ( + TcpReceiver::new(message_handler.clone(), config.tcp_config.get_tcp_receiver_reconnect_interval()? * 2), + config.tcp_config.get_snapshot_addr_ingester()?, + ); + let cloned_rx = shutdown_rx.resubscribe(); + mutexed_tasks.lock().await.spawn(connect_to_snapshot_receiver( + snapshot_tcp_receiver, + snapshot_addr, + cloned_rx, + )); + + let cloned_buffer = buffer.clone(); + let cloned_rx = shutdown_rx.resubscribe(); + let cloned_metrics = metrics_state.ingester_metrics.clone(); + mutexed_tasks + .lock() + .await + .spawn(debug_buffer(cloned_rx, cloned_buffer, cloned_metrics)); + + // Workers for snapshot parsing + for _ in 0..config.snapshot_parsing_workers { run_accounts_processor( shutdown_rx.resubscribe(), mutexed_tasks.clone(), - redis_receiver, + buffer.clone(), primary_rocks_storage.clone(), - config.accounts_buffer_size, + config.snapshot_parsing_batch_size, config.mpl_core_fees_buffer_size, metrics_state.ingester_metrics.clone(), - Some(metrics_state.message_process_metrics.clone()), + // during snapshot parsing we don't want to collect message process metrics + None, index_pg_storage.clone(), rpc_client.clone(), mutexed_tasks.clone(), ) .await; } - MessageSource::TCP => { + for _ in 0..config.accounts_parsing_workers { run_accounts_processor( shutdown_rx.resubscribe(), mutexed_tasks.clone(), @@ -257,26 +216,65 @@ pub async fn main() -> Result<(), IngesterError> { ) .await; } + + run_transaction_processor( + shutdown_rx.resubscribe(), + mutexed_tasks.clone(), + buffer.clone(), + geyser_bubblegum_updates_processor.clone(), + // TCP sender does not send any ids with timestamps so we may not pass message process metrics here + None, + ) + .await; } - } + MessageSource::Redis => { + let cloned_rx = shutdown_rx.resubscribe(); + let ack_channel = + create_ack_channel(cloned_rx, config.redis_messenger_config.clone(), mutexed_tasks.clone()).await; - // Workers for snapshot parsing - for _ in 0..config.snapshot_parsing_workers { - run_accounts_processor( - shutdown_rx.resubscribe(), - mutexed_tasks.clone(), - buffer.clone(), - primary_rocks_storage.clone(), - config.snapshot_parsing_batch_size, - config.mpl_core_fees_buffer_size, - metrics_state.ingester_metrics.clone(), - // during snapshot parsing we don't want to collect message process metrics - None, - index_pg_storage.clone(), - rpc_client.clone(), - mutexed_tasks.clone(), - ) - .await; + for _ in 0..config.accounts_parsing_workers { + let redis_receiver = Arc::new( + RedisReceiver::new( + config.redis_messenger_config.clone(), + ConsumptionType::All, + ack_channel.clone(), + ) + .await?, + ); + run_accounts_processor( + shutdown_rx.resubscribe(), + mutexed_tasks.clone(), + redis_receiver, + primary_rocks_storage.clone(), + config.accounts_buffer_size, + config.mpl_core_fees_buffer_size, + metrics_state.ingester_metrics.clone(), + Some(metrics_state.message_process_metrics.clone()), + index_pg_storage.clone(), + rpc_client.clone(), + mutexed_tasks.clone(), + ) + .await; + } + for _ in 0..config.transactions_parsing_workers { + let redis_receiver = Arc::new( + RedisReceiver::new( + config.redis_messenger_config.clone(), + ConsumptionType::All, + ack_channel.clone(), + ) + .await?, + ); + run_transaction_processor( + shutdown_rx.resubscribe(), + mutexed_tasks.clone(), + redis_receiver, + geyser_bubblegum_updates_processor.clone(), + Some(metrics_state.message_process_metrics.clone()), + ) + .await; + } + } } let last_saved_slot = primary_rocks_storage.last_saved_slot()?.unwrap_or_default(); @@ -299,6 +297,7 @@ pub async fn main() -> Result<(), IngesterError> { index_pg_storage.clone(), primary_rocks_storage.clone(), metrics_state.json_downloader_metrics.clone(), + metrics_state.red_metrics.clone(), ) .await, ); @@ -372,12 +371,14 @@ pub async fn main() -> Result<(), IngesterError> { let cloned_index_storage = index_pg_storage.clone(); let file_storage_path = api_config.file_storage_path_container.clone(); + let red_metrics = metrics_state.red_metrics.clone(); mutexed_tasks.lock().await.spawn(async move { match start_api( cloned_index_storage, cloned_rocks_storage.clone(), cloned_rx, cloned_api_metrics, + Some(red_metrics), api_config.server_port, proof_checker, tree_gaps_checker, @@ -393,6 +394,7 @@ pub async fn main() -> Result<(), IngesterError> { api_config.file_storage_path_container.as_str(), account_balance_getter, api_config.storage_service_base_url, + api_config.native_mint_pubkey, ) .await { @@ -404,46 +406,6 @@ pub async fn main() -> Result<(), IngesterError> { } }); - let geyser_bubblegum_updates_processor = Arc::new(BubblegumTxProcessor::new( - primary_rocks_storage.clone(), - metrics_state.ingester_metrics.clone(), - buffer.json_tasks.clone(), - )); - - for _ in 0..config.transactions_parsing_workers { - match config.message_source { - MessageSource::Redis => { - let redis_receiver = Arc::new( - RedisReceiver::new( - config.redis_messenger_config.clone(), - ConsumptionType::All, - ack_channel.clone(), - ) - .await?, - ); - run_transaction_processor( - shutdown_rx.resubscribe(), - mutexed_tasks.clone(), - redis_receiver, - geyser_bubblegum_updates_processor.clone(), - Some(metrics_state.message_process_metrics.clone()), - ) - .await; - } - MessageSource::TCP => { - run_transaction_processor( - shutdown_rx.resubscribe(), - mutexed_tasks.clone(), - buffer.clone(), - geyser_bubblegum_updates_processor.clone(), - // TCP sender does not send any ids with timestamps so we may not pass message process metrics here - None, - ) - .await; - } - } - } - let cloned_rx = shutdown_rx.resubscribe(); let cloned_jp = json_processor.clone(); mutexed_tasks @@ -451,11 +413,8 @@ pub async fn main() -> Result<(), IngesterError> { .await .spawn(json_worker::run(cloned_jp, cloned_rx).map(|_| Ok(()))); - let backfill_bubblegum_updates_processor = Arc::new(BubblegumTxProcessor::new( - primary_rocks_storage.clone(), - metrics_state.ingester_metrics.clone(), - buffer.json_tasks.clone(), - )); + let backfill_bubblegum_updates_processor = + Arc::new(BubblegumTxProcessor::new(primary_rocks_storage.clone(), metrics_state.ingester_metrics.clone())); let tx_ingester = Arc::new(BackfillTransactionIngester::new(backfill_bubblegum_updates_processor.clone())); let backfiller_config = setup_config::(INGESTER_CONFIG_PREFIX); let backfiller_source = Arc::new( @@ -466,139 +425,49 @@ pub async fn main() -> Result<(), IngesterError> { ) .await, ); - let backfiller = - Arc::new(Backfiller::new(primary_rocks_storage.clone(), backfiller_source.clone(), backfiller_config.clone())); - let rpc_backfiller = Arc::new(BackfillRPC::connect(config.backfill_rpc_address.clone())); + let slot_db = Arc::new( + SlotStorage::open_secondary( + args.slots_db_path, + args.secondary_slots_db_path, + mutexed_tasks.clone(), + metrics_state.red_metrics.clone(), + ) + .expect("Failed to open slot storage"), + ); + let shutdown_token = CancellationToken::new(); if config.run_bubblegum_backfiller { if backfiller_config.should_reingest { - warn!("'Reingest' flag is set, deleting last fetched slot."); + warn!("'Reingest' flag is set, deleting last backfilled slot."); primary_rocks_storage - .delete_parameter::(rocks_db::parameters::Parameter::LastFetchedSlot) + .delete_parameter::(rocks_db::parameters::Parameter::LastBackfilledSlot) .await?; } match backfiller_config.backfiller_mode { BackfillerMode::IngestDirectly => { - let consumer = Arc::new(DirectBlockParser::new( - tx_ingester.clone(), - primary_rocks_storage.clone(), - metrics_state.backfiller_metrics.clone(), - )); - backfiller - .start_backfill( - mutexed_tasks.clone(), - shutdown_rx.resubscribe(), - metrics_state.backfiller_metrics.clone(), - consumer, - backfiller_source.clone(), - ) - .await?; - info!("Running Backfiller directly from bigtable to ingester."); + panic!("IngestDirectly mode is not supported any more."); } BackfillerMode::Persist => { - let consumer = primary_rocks_storage.clone(); - backfiller - .start_backfill( - mutexed_tasks.clone(), - shutdown_rx.resubscribe(), - metrics_state.backfiller_metrics.clone(), - consumer, - backfiller_source.clone(), - ) - .await?; - info!("Running Backfiller to persist raw data."); + panic!("Persist mode is not supported any more. Use slot_persister binary instead."); } BackfillerMode::IngestPersisted => { + panic!("IngestDirectly mode is not supported any more. Use backfill binary instead."); + } + BackfillerMode::PersistAndIngest => { let consumer = Arc::new(DirectBlockParser::new( tx_ingester.clone(), primary_rocks_storage.clone(), metrics_state.backfiller_metrics.clone(), )); - let producer = primary_rocks_storage.clone(); - - let transactions_parser = Arc::new(TransactionsParser::new( - primary_rocks_storage.clone(), - Arc::new(BubblegumSlotGetter::new(primary_rocks_storage.clone())), - consumer, - producer, - metrics_state.backfiller_metrics.clone(), - backfiller_config.workers_count, - backfiller_config.chunk_size, - )); - - let cloned_rx = shutdown_rx.resubscribe(); + let shutdown_token = shutdown_token.clone(); + let db = primary_rocks_storage.clone(); + let metrics: Arc = metrics_state.backfiller_metrics.clone(); + let slot_db = slot_db.clone(); mutexed_tasks.lock().await.spawn(async move { - info!("Running transactions parser..."); - - transactions_parser - .parse_raw_transactions( - cloned_rx, - backfiller_config.permitted_tasks, - backfiller_config.slot_until, - ) - .await; - + nft_ingester::backfiller::run_backfill_slots(shutdown_token, db, slot_db, consumer, metrics).await; Ok(()) }); - - info!("Running Backfiller on persisted raw data."); - } - BackfillerMode::PersistAndIngest => { - let rx = shutdown_rx.resubscribe(); - let metrics = Arc::new(BackfillerMetricsConfig::new()); - metrics.register_with_prefix(&mut metrics_state.registry, "slot_fetcher_"); - let backfiller_clone = backfiller.clone(); - let rpc_backfiller_clone = rpc_backfiller.clone(); - mutexed_tasks.lock().await.spawn(run_perpetual_slot_collection( - backfiller_clone, - rpc_backfiller_clone, - metrics, - backfiller_config.wait_period_sec, - rx, - )); - - // run perpetual slot persister - let rx = shutdown_rx.resubscribe(); - let consumer = primary_rocks_storage.clone(); - let producer = backfiller_source.clone(); - let metrics = Arc::new(BackfillerMetricsConfig::new()); - metrics.register_with_prefix(&mut metrics_state.registry, "slot_persister_"); - let slot_getter = Arc::new(BubblegumSlotGetter::new(primary_rocks_storage.clone())); - let backfiller_clone = backfiller.clone(); - mutexed_tasks.lock().await.spawn(run_perpetual_slot_processing( - backfiller_clone, - metrics, - slot_getter, - consumer, - producer, - backfiller_config.wait_period_sec, - rx, - None, - )); - // run perpetual ingester - let rx = shutdown_rx.resubscribe(); - let consumer = Arc::new(DirectBlockParser::new( - tx_ingester.clone(), - primary_rocks_storage.clone(), - metrics_state.backfiller_metrics.clone(), - )); - let producer = primary_rocks_storage.clone(); - let metrics = Arc::new(BackfillerMetricsConfig::new()); - metrics.register_with_prefix(&mut metrics_state.registry, "slot_ingester_"); - let slot_getter = Arc::new(IngestableSlotGetter::new(primary_rocks_storage.clone())); - let backfiller_clone = backfiller.clone(); - let backup = backfiller_source.clone(); - mutexed_tasks.lock().await.spawn(run_perpetual_slot_processing( - backfiller_clone, - metrics, - slot_getter, - consumer, - producer, - backfiller_config.wait_period_sec, - rx, - Some(backup), - )); } BackfillerMode::None => { info!("Not running Backfiller."); @@ -606,39 +475,9 @@ pub async fn main() -> Result<(), IngesterError> { }; } - if !config.disable_synchronizer { - for asset_type in ASSET_TYPES { - let rx = shutdown_rx.resubscribe(); - let synchronizer = synchronizer.clone(); - mutexed_tasks.lock().await.spawn(async move { - match asset_type { - AssetType::NonFungible => { - synchronizer - .nft_run(&rx, config.dump_sync_threshold, Duration::from_secs(5)) - .await - } - AssetType::Fungible => { - synchronizer - .fungible_run(&rx, config.dump_sync_threshold, Duration::from_secs(5)) - .await - } - } - - Ok(()) - }); - } - } - while let Some(res) = mutexed_tasks.lock().await.join_next().await { - match res { - Ok(_) => {} - Err(err) if err.is_panic() => panic::resume_unwind(err.into_panic()), - Err(err) => panic!("{err}"), - } - } - // setup dependencies for grpc server let uc = AssetStreamer::new(config.peer_grpc_max_gap_slots, primary_rocks_storage.clone()); - let bs = BlocksStreamer::new(config.peer_grpc_max_gap_slots, primary_rocks_storage.clone()); + let bs = BlocksStreamer::new(config.peer_grpc_max_gap_slots, slot_db.clone()); let serv = PeerGapFillerServiceImpl::new(Arc::new(uc), Arc::new(bs), primary_rocks_storage.clone()); let asset_url_serv = AssetUrlServiceImpl::new(primary_rocks_storage.clone()); let addr = format!("0.0.0.0:{}", config.peer_grpc_port).parse()?; @@ -659,6 +498,8 @@ pub async fn main() -> Result<(), IngesterError> { Scheduler::run_in_background(Scheduler::new(primary_rocks_storage.clone())).await; + let rpc_backfiller = Arc::new(BackfillRPC::connect(config.backfill_rpc_address.clone())); + let rocks_clone = primary_rocks_storage.clone(); let signature_fetcher = SignatureFetcher::new( rocks_clone, @@ -756,7 +597,7 @@ pub async fn main() -> Result<(), IngesterError> { if config.run_fork_cleaner { let fork_cleaner = ForkCleaner::new( primary_rocks_storage.clone(), - primary_rocks_storage.clone(), + slot_db.clone(), metrics_state.fork_cleaner_metrics.clone(), ); let rx = shutdown_rx.resubscribe(); @@ -829,7 +670,15 @@ pub async fn main() -> Result<(), IngesterError> { start_metrics(metrics_state.registry, config.metrics_port).await; // --stop - graceful_stop(mutexed_tasks, shutdown_tx, guard, config.profiling_file_path_container, &config.heap_path).await; + graceful_stop( + mutexed_tasks, + shutdown_tx, + Some(shutdown_token), + guard, + config.profiling_file_path_container, + &config.heap_path, + ) + .await; Ok(()) } diff --git a/nft_ingester/src/bin/migrator/main.rs b/nft_ingester/src/bin/migrator/main.rs index cb6474c1d..85d3f0c64 100644 --- a/nft_ingester/src/bin/migrator/main.rs +++ b/nft_ingester/src/bin/migrator/main.rs @@ -6,6 +6,8 @@ use metrics_utils::red::RequestErrorDurationMetrics; use metrics_utils::utils::start_metrics; use metrics_utils::{JsonMigratorMetricsConfig, MetricState, MetricStatus, MetricsTrait}; use postgre_client::PgClient; +use rocks_db::asset::AssetCompleteDetails; +use rocks_db::column::TypedColumn; use tokio::sync::broadcast::Receiver; use tokio::sync::{broadcast, Mutex}; use tokio::task::{JoinError, JoinSet}; @@ -16,8 +18,9 @@ use nft_ingester::config::{ }; use nft_ingester::error::IngesterError; use nft_ingester::init::graceful_stop; +use rocks_db::asset_generated::asset as fb; use rocks_db::migrator::MigrationState; -use rocks_db::{AssetDynamicDetails, Storage}; +use rocks_db::Storage; pub const DEFAULT_MIN_POSTGRES_CONNECTIONS: u32 = 100; pub const DEFAULT_MAX_POSTGRES_CONNECTIONS: u32 = 100; @@ -55,7 +58,7 @@ pub async fn main() -> Result<(), IngesterError> { let red_metrics = Arc::new(RequestErrorDurationMetrics::new()); let storage = Storage::open( - &config.json_target_db.clone(), + config.json_target_db.clone(), mutexed_tasks.clone(), red_metrics.clone(), MigrationState::Last, @@ -206,7 +209,13 @@ impl JsonMigrator { rx: Receiver<()>, tasks: Arc>>>, ) { - let dynamic_asset_details = self.target_rocks_db.asset_dynamic_data.iter_end(); + let mut assets_iter = self.target_rocks_db.db.raw_iterator_cf( + &self + .target_rocks_db + .db + .cf_handle(AssetCompleteDetails::NAME) + .unwrap(), + ); let tasks_buffer = Arc::new(Mutex::new(Vec::new())); @@ -270,22 +279,23 @@ impl JsonMigrator { Ok(()) }); - for dynamic_details in dynamic_asset_details { + assets_iter.seek_to_first(); + while assets_iter.valid() { if !rx.is_empty() { info!("Setting tasks for JSONs is stopped"); break; } - - match dynamic_details { - Ok((_key, value)) => { - let dynamic_details = bincode::deserialize::(&value); - - match dynamic_details { - Ok(data) => { - let downloaded_json = self - .target_rocks_db - .asset_offchain_data - .get(data.url.value.clone()); + if let Some(value) = assets_iter.value() { + match fb::root_as_asset_complete_details(value) { + Ok(asset) => { + if let Some(url) = asset + .dynamic_details() + .and_then(|d| d.url()) + .and_then(|u| u.value()) + { + let url = url.trim().replace('\0', "").clone(); + let downloaded_json = + self.target_rocks_db.asset_offchain_data.get(url.clone()); if let Err(e) = downloaded_json { error!("asset_offchain_data.get: {}", e); @@ -293,7 +303,7 @@ impl JsonMigrator { } let mut task = Task { - ofd_metadata_url: data.url.value.trim().replace('\0', "").clone(), + ofd_metadata_url: url, ofd_locked_until: None, ofd_attempts: 0, ofd_max_attempts: 10, @@ -317,15 +327,13 @@ impl JsonMigrator { self.metrics .set_tasks_buffer("tasks_buffer", buff.len() as i64); } - Err(e) => { - error!("bincode::deserialize: {}", e) - } } - } - Err(e) => { - error!("asset_dynamic_data.iter_end: {}", e) + Err(e) => { + error!("root_as_asset_complete_details: {}", e); + } } } + assets_iter.next(); } } } diff --git a/nft_ingester/src/bin/raw_backfiller/main.rs b/nft_ingester/src/bin/raw_backfiller/main.rs deleted file mode 100644 index 1c6624410..000000000 --- a/nft_ingester/src/bin/raw_backfiller/main.rs +++ /dev/null @@ -1,232 +0,0 @@ -use std::sync::Arc; - -use nft_ingester::backfiller::{BackfillSource, Backfiller, DirectBlockParser, TransactionsParser}; -use nft_ingester::buffer::Buffer; -use nft_ingester::config::{ - self, init_logger, setup_config, BackfillerConfig, RawBackfillConfig, INGESTER_CONFIG_PREFIX, -}; -use nft_ingester::error::IngesterError; -use nft_ingester::init::graceful_stop; -use nft_ingester::processors::transaction_based::bubblegum_updates_processor::BubblegumTxProcessor; -use nft_ingester::transaction_ingester; -use prometheus_client::registry::Registry; -use tempfile::TempDir; -use tracing::{error, info}; - -use metrics_utils::red::RequestErrorDurationMetrics; -use metrics_utils::utils::setup_metrics; -use metrics_utils::{BackfillerMetricsConfig, IngesterMetricsConfig}; -use rocks_db::bubblegum_slots::BubblegumSlotGetter; -use rocks_db::migrator::MigrationState; -use rocks_db::Storage; -use tokio::sync::{broadcast, Mutex}; -use tokio::task::JoinSet; - -#[cfg(feature = "profiling")] -#[global_allocator] -static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc; - -pub const DEFAULT_ROCKSDB_PATH: &str = "./my_rocksdb"; - -#[tokio::main(flavor = "multi_thread")] -pub async fn main() -> Result<(), IngesterError> { - info!("Starting raw backfill server..."); - - let config: RawBackfillConfig = setup_config(INGESTER_CONFIG_PREFIX); - init_logger(&config.log_level); - - let guard = if config.run_profiling { - Some( - pprof::ProfilerGuardBuilder::default() - .frequency(100) - .build() - .unwrap(), - ) - } else { - None - }; - - let mut registry = Registry::default(); - let metrics = Arc::new(BackfillerMetricsConfig::new()); - metrics.register(&mut registry); - let ingester_metrics = Arc::new(IngesterMetricsConfig::new()); - ingester_metrics.register(&mut registry); - - tokio::spawn(async move { - match setup_metrics(registry, config.metrics_port).await { - Ok(_) => { - info!("Setup metrics successfully") - } - Err(e) => { - error!("Setup metrics failed: {:?}", e) - } - } - }); - - let tasks = JoinSet::new(); - let mutexed_tasks = Arc::new(Mutex::new(tasks)); - - let primary_storage_path = config - .rocks_db_path_container - .clone() - .unwrap_or(DEFAULT_ROCKSDB_PATH.to_string()); - - let red_metrics = Arc::new(RequestErrorDurationMetrics::new()); - { - // storage in secondary mod cannot create new column families, that - // could be required for migration_version_manager, so firstly open - // storage with MigrationState::CreateColumnFamilies in order to create - // all column families - Storage::open( - &config - .rocks_db_path_container - .clone() - .unwrap_or(DEFAULT_ROCKSDB_PATH.to_string()), - mutexed_tasks.clone(), - red_metrics.clone(), - MigrationState::CreateColumnFamilies, - ) - .unwrap(); - } - let migration_version_manager_dir = TempDir::new().unwrap(); - let migration_version_manager = Storage::open_secondary( - &config - .rocks_db_path_container - .clone() - .unwrap_or(DEFAULT_ROCKSDB_PATH.to_string()), - migration_version_manager_dir.path().to_str().unwrap(), - mutexed_tasks.clone(), - red_metrics.clone(), - MigrationState::Last, - ) - .unwrap(); - Storage::apply_all_migrations( - &config - .rocks_db_path_container - .clone() - .unwrap_or(DEFAULT_ROCKSDB_PATH.to_string()), - &config.migration_storage_path, - Arc::new(migration_version_manager), - ) - .await - .unwrap(); - let storage = Storage::open( - &primary_storage_path, - mutexed_tasks.clone(), - red_metrics.clone(), - MigrationState::Last, - ) - .unwrap(); - - let rocks_storage = Arc::new(storage); - - let consumer = rocks_storage.clone(); - let backfiller_config: BackfillerConfig = setup_config(INGESTER_CONFIG_PREFIX); - - let backfiller_source = Arc::new( - BackfillSource::new( - &backfiller_config.backfiller_source_mode, - backfiller_config.rpc_host.clone(), - &backfiller_config.big_table_config, - ) - .await, - ); - - let backfiller = Backfiller::new( - rocks_storage.clone(), - backfiller_source.clone(), - backfiller_config.clone(), - ); - let (shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); - - match backfiller_config.backfiller_mode { - config::BackfillerMode::IngestDirectly => { - todo!(); - } - config::BackfillerMode::Persist | config::BackfillerMode::PersistAndIngest => { - backfiller - .start_backfill( - mutexed_tasks.clone(), - shutdown_rx.resubscribe(), - metrics.clone(), - consumer, - backfiller_source.clone(), - ) - .await - .unwrap(); - info!("running backfiller to persist raw data"); - } - config::BackfillerMode::IngestPersisted => { - let buffer = Arc::new(Buffer::new()); - // run dev->null buffer consumer - let cloned_rx = shutdown_rx.resubscribe(); - let clonned_json_deque = buffer.json_tasks.clone(); - mutexed_tasks.lock().await.spawn(async move { - info!("Running empty buffer consumer..."); - while cloned_rx.is_empty() { - clonned_json_deque.lock().await.clear(); - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - } - - Ok(()) - }); - let bubblegum_updates_processor = Arc::new(BubblegumTxProcessor::new( - rocks_storage.clone(), - ingester_metrics.clone(), - buffer.json_tasks.clone(), - )); - - let tx_ingester = Arc::new(transaction_ingester::BackfillTransactionIngester::new( - bubblegum_updates_processor.clone(), - )); - - let consumer = Arc::new(DirectBlockParser::new( - tx_ingester.clone(), - rocks_storage.clone(), - metrics.clone(), - )); - let producer = rocks_storage.clone(); - - let transactions_parser = Arc::new(TransactionsParser::new( - rocks_storage.clone(), - Arc::new(BubblegumSlotGetter::new(rocks_storage.clone())), - consumer, - producer, - metrics.clone(), - backfiller_config.workers_count, - backfiller_config.chunk_size, - )); - - mutexed_tasks.lock().await.spawn(async move { - info!("Running transactions parser..."); - - transactions_parser - .parse_raw_transactions( - shutdown_rx.resubscribe(), - backfiller_config.permitted_tasks, - backfiller_config.slot_until, - ) - .await; - - Ok(()) - }); - - info!("running backfiller on persisted raw data"); - } - config::BackfillerMode::None => { - info!("not running backfiller"); - } - }; - - // --stop - graceful_stop( - mutexed_tasks, - shutdown_tx, - guard, - config.profiling_file_path_container, - &config.heap_path, - ) - .await; - - Ok(()) -} diff --git a/nft_ingester/src/bin/slot_checker/main.rs b/nft_ingester/src/bin/slot_checker/main.rs new file mode 100644 index 000000000..6534428e6 --- /dev/null +++ b/nft_ingester/src/bin/slot_checker/main.rs @@ -0,0 +1,379 @@ +use std::collections::{BTreeSet, HashSet}; +use std::path::PathBuf; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use clap::Parser; +use indicatif::{ProgressBar, ProgressStyle}; +use metrics_utils::MetricState; +use rocks_db::column::TypedColumn; +use rocks_db::migrator::MigrationVersions; +use rocks_db::Storage; +use tokio::signal; +use tokio::sync::{broadcast, Mutex as AsyncMutex}; +use tracing::{error, info, warn}; + +use entities::models::{OffChainData, RawBlock}; +use interface::slots_dumper::SlotsDumper; +use usecase::bigtable::BigTableClient; +use usecase::slots_collector::SlotsCollector; + +use blockbuster::programs::bubblegum::ID as BUBBLEGUM_PROGRAM_ID; + +use tokio_util::sync::CancellationToken; + +// For InMemorySlotsDumper +use async_trait::async_trait; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Path to the target RocksDB instance with slots + #[arg(short, long)] + target_db_path: PathBuf, + + /// Optional big table credentials file path + /// If not provided, the default credentials file path will be used + /// Default: ./creds.json + #[arg(short, long, default_value = "./creds.json")] + big_table_credentials: String, + + /// Optional big table timeout (default: 1000) + #[arg(short, long, default_value_t = 1000)] + big_table_timeout: u32, + + /// Optional comma-separated list of slot numbers to check + #[arg(short = 's', long)] + slots: Option, +} + +pub struct InMemorySlotsDumper { + slots: AsyncMutex>, +} + +impl InMemorySlotsDumper { + /// Creates a new instance of `InMemorySlotsDumper`. + pub fn new() -> Self { + Self { + slots: AsyncMutex::new(BTreeSet::new()), + } + } + + /// Retrieves the sorted keys in ascending order. + pub async fn get_sorted_keys(&self) -> Vec { + let slots = self.slots.lock().await; + slots.iter().cloned().collect() + } + + /// Clears the internal storage to reuse it. + pub async fn clear(&self) { + let mut slots = self.slots.lock().await; + slots.clear(); + } +} + +#[async_trait] +impl SlotsDumper for InMemorySlotsDumper { + async fn dump_slots(&self, slots: &[u64]) { + let mut storage = self.slots.lock().await; + for &slot in slots { + storage.insert(slot); + } + } +} + +// Function to get the last persisted slot from RocksDB +fn get_last_persisted_slots(rocks_db: Arc) -> u64 { + let mut it = rocks_db + .db + .raw_iterator_cf(&rocks_db.db.cf_handle(RawBlock::NAME).unwrap()); + it.seek_to_last(); + if !it.valid() { + return 0; + } + it.key() + .and_then(|b| RawBlock::decode_key(b.to_vec()).ok()) + .unwrap_or_default() +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Initialize tracing subscriber for logging + tracing_subscriber::fmt::init(); + info!("Starting Slot Checker..."); + + let args = Args::parse(); + let metrics_state = MetricState::new(); + + // Open target RocksDB in read-only mode + let db = Arc::new( + Storage::open_readonly_with_cfs( + &args.target_db_path, + vec![RawBlock::NAME, MigrationVersions::NAME, OffChainData::NAME], + Arc::new(tokio::sync::Mutex::new(tokio::task::JoinSet::new())), + metrics_state.red_metrics, + ) + .expect("Failed to open target RocksDB"), + ); + + // Get the last persisted slot from RocksDB + let last_persisted_slot = get_last_persisted_slots(db.clone()); + + info!("Last persisted slot: {}", last_persisted_slot); + + // Initialize BigTableClient + let bt_connection = Arc::new( + BigTableClient::connect_new_with(args.big_table_credentials, args.big_table_timeout) + .await + .expect("Failed to connect to BigTable"), + ); + + // Initialize the in-memory slots dumper + let in_mem_dumper = Arc::new(InMemorySlotsDumper::new()); + + // Initialize the slots collector + let slots_collector = SlotsCollector::new( + in_mem_dumper.clone(), + bt_connection.big_table_inner_client.clone(), + metrics_state.backfiller_metrics, + ); + + // Handle Ctrl+C + let shutdown_token = CancellationToken::new(); + let (shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); + + // Spawn a task to handle graceful shutdown on Ctrl+C + { + let shutdown_token = shutdown_token.clone(); + tokio::spawn(async move { + if signal::ctrl_c().await.is_ok() { + info!("Received Ctrl+C, shutting down gracefully..."); + shutdown_token.cancel(); + shutdown_tx.send(()).unwrap(); + } else { + error!("Unable to listen for shutdown signal"); + } + }); + } + // Check if slots or slots_file is provided + let mut slots_to_check = Vec::new(); + + if let Some(slots_str) = args.slots { + // Parse comma-separated list of slots + info!("Checking specific slots provided via command line."); + for part in slots_str.split(',') { + let slot_str = part.trim(); + if let Ok(slot) = slot_str.parse::() { + slots_to_check.push(slot); + } else { + warn!("Invalid slot number provided: {}", slot_str); + } + } + } + if !slots_to_check.is_empty() { + // Remove duplicates + let slots_to_check: Vec = { + let mut set = HashSet::new(); + slots_to_check + .into_iter() + .filter(|x| set.insert(*x)) + .collect() + }; + + let total_slots_to_check = slots_to_check.len(); + + info!("Total slots to check: {}", total_slots_to_check); + + // Initialize progress bar for verification + let progress_bar = ProgressBar::new(total_slots_to_check as u64); + progress_bar.set_style( + ProgressStyle::default_bar() + .template("{msg} {bar:40.cyan/blue} {pos}/{len} [{eta_precise}]") + .unwrap() + .progress_chars("##-"), + ); + progress_bar.set_message("Verifying slots"); + + let cf_handle = db.db.cf_handle(RawBlock::NAME).unwrap(); + + let mut present_slots = Vec::new(); + let mut missing_slots = Vec::new(); + + // Sort slots to check for consistent batching + let mut slots_to_check = slots_to_check; + slots_to_check.sort_unstable(); + + // Prepare keys + let keys: Vec<_> = slots_to_check + .iter() + .map(|&slot| RawBlock::encode_key(slot)) + .collect(); + + // Batch get + let results = db.db.batched_multi_get_cf(&cf_handle, keys, true); + + for (i, result) in results.into_iter().enumerate() { + let slot = slots_to_check[i]; + match result { + Ok(Some(_)) => { + present_slots.push(slot); + } + Ok(None) => { + missing_slots.push(slot); + } + Err(e) => { + error!("Error fetching slot {}: {}", slot, e); + missing_slots.push(slot); // Consider as missing on error + } + } + progress_bar.inc(1); + } + + progress_bar.finish_with_message("Verification complete."); + + // Output results + info!("Slots present in RocksDB: {:?}", present_slots); + + info!("Slots missing from RocksDB: {:?}", missing_slots); + + return Ok(()); // Exit after processing + } + + // Store missing slots + let missing_slots = Arc::new(Mutex::new(Vec::new())); + + info!( + "Starting to collect slots from {} to {}", + 0, last_persisted_slot + ); + + // Initialize progress bar spinner for collection + let progress_bar = ProgressBar::new_spinner(); + progress_bar.set_message("Collecting slots..."); + progress_bar.enable_steady_tick(Duration::from_millis(100)); // Update every 100ms + + // Collect slots from last persisted slot down to 0 + in_mem_dumper.clear().await; + + // Start slot collection + let _ = slots_collector + .collect_slots(&BUBBLEGUM_PROGRAM_ID, last_persisted_slot, 0, &shutdown_rx) + .await; + + // Collection done, stop the spinner + progress_bar.finish_with_message("Slot collection complete."); + + // Get the collected slots + let collected_slots = in_mem_dumper.get_sorted_keys().await; + in_mem_dumper.clear().await; + + if shutdown_token.is_cancelled() { + info!("Shutdown signal received, stopping..."); + return Ok(()); + } + + let total_slots_to_check = collected_slots.len() as u64; + + info!( + "Collected {} slots in range {} to {}", + total_slots_to_check, 0, last_persisted_slot + ); + + // Initialize progress bar for verification + let progress_bar = ProgressBar::new(total_slots_to_check); + progress_bar.set_style( + ProgressStyle::default_bar() + .template("{spinner:.green} [{elapsed_precise}] {msg} {bar:40.cyan/blue} {percent}% ({pos}/{len}) [{eta_precise}]") + .unwrap() + .progress_chars("##-"), + ); + progress_bar.set_message("Verifying slots"); + + // Prepare iterators for collected slots and RocksDB keys + let mut slots_iter = collected_slots.into_iter(); + let mut next_slot = slots_iter.next(); + + let cf_handle = db.db.cf_handle(RawBlock::NAME).unwrap(); + let mut db_iter = db.db.raw_iterator_cf(&cf_handle); + db_iter.seek_to_first(); + + let mut current_db_slot = if db_iter.valid() { + if let Some(key_bytes) = db_iter.key() { + RawBlock::decode_key(key_bytes.to_vec()).ok() + } else { + None + } + } else { + None + }; + + // Verification loop + while let Some(slot) = next_slot { + if shutdown_token.is_cancelled() { + info!("Shutdown signal received, stopping..."); + break; + } + + if let Some(db_slot) = current_db_slot { + if slot == db_slot { + // Slot exists in RocksDB + // Advance both iterators + next_slot = slots_iter.next(); + db_iter.next(); + current_db_slot = if db_iter.valid() { + if let Some(key_bytes) = db_iter.key() { + RawBlock::decode_key(key_bytes.to_vec()).ok() + } else { + None + } + } else { + None + }; + } else if slot < db_slot { + // Slot is missing in RocksDB + { + let mut missing_slots_lock = missing_slots.lock().unwrap(); + missing_slots_lock.push(slot); + } + // Advance slots iterator + next_slot = slots_iter.next(); + } else { + // slot > db_slot + // Advance RocksDB iterator + db_iter.next(); + current_db_slot = if db_iter.valid() { + if let Some(key_bytes) = db_iter.key() { + RawBlock::decode_key(key_bytes.to_vec()).ok() + } else { + None + } + } else { + None + }; + } + } else { + // No more slots in RocksDB, remaining slots are missing + { + let mut missing_slots_lock = missing_slots.lock().unwrap(); + missing_slots_lock.push(slot); + missing_slots_lock.extend(slots_iter); + } + break; + } + + // Update progress bar + progress_bar.inc(1); + } + + progress_bar.finish_with_message("Verification complete."); + + // Print missing slots + let missing_slots = missing_slots.lock().unwrap(); + if !missing_slots.is_empty() { + info!("Missing slots: {:?}", missing_slots); + } else { + println!("All collected slots are present in the RocksDB."); + } + + Ok(()) +} diff --git a/nft_ingester/src/bin/slot_persister/main.rs b/nft_ingester/src/bin/slot_persister/main.rs new file mode 100644 index 000000000..a0913b913 --- /dev/null +++ b/nft_ingester/src/bin/slot_persister/main.rs @@ -0,0 +1,494 @@ +use async_trait::async_trait; +use backfill_rpc::rpc::BackfillRPC; +use clap::Parser; +use entities::models::RawBlock; +use futures::future::join_all; +use interface::signature_persistence::BlockProducer; +use interface::slot_getter::FinalizedSlotGetter; +use interface::slots_dumper::SlotsDumper; +use metrics_utils::utils::start_metrics; +use metrics_utils::{MetricState, MetricsTrait}; +use nft_ingester::backfiller::BackfillSource; +use rocks_db::column::TypedColumn; +use rocks_db::SlotStorage; +use std::collections::{BTreeSet, HashMap, HashSet}; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::Semaphore; +use tokio::sync::{broadcast, Mutex}; +use tokio_retry::strategy::ExponentialBackoff; +use tokio_retry::RetryIf; +use tokio_util::sync::CancellationToken; +use tracing::{debug, error, info, warn}; +use usecase::bigtable::BigTableClient; +use usecase::slots_collector::SlotsCollector; + +const MAX_RETRIES: usize = 5; +const INITIAL_DELAY_MS: u64 = 100; + +const MAX_BATCH_RETRIES: usize = 5; +const INITIAL_BATCH_DELAY_MS: u64 = 500; +// Offset to start collecting slots from, approximately 2 minutes before the finalized slot, given the eventual consistency of the big table +const SLOT_COLLECTION_OFFSET: u64 = 300; + +#[derive(Parser, Debug)] +#[command( + author, + version, + about, + long_about = "Slot persister reads the slot data from the BigTable and persists it to the RocksDB." +)] +struct Args { + /// Path to the target RocksDB instance with slots + #[arg(short, long)] + target_db_path: PathBuf, + + /// RPC host + #[arg(short, long)] + rpc_host: String, + + /// Optional starting slot number, this will override the last saved slot in the RocksDB + #[arg(short, long)] + start_slot: Option, + + /// Big table credentials file path + #[arg(short, long)] + big_table_credentials: Option, + + /// Optional big table timeout (default: 1000) + #[arg(short = 'B', long, default_value_t = 1000)] + big_table_timeout: u32, + + /// Metrics port + /// Default: 9090 + #[arg(short, long, default_value = "9090")] + metrics_port: u16, + + /// Number of slots to process in each batch + #[arg(short, long, default_value_t = 200)] + chunk_size: usize, + + /// Maximum number of concurrent requests + #[arg(short = 'M', long, default_value_t = 20)] + max_concurrency: usize, + + /// Optional comma-separated list of slot numbers to check + #[arg(long)] + slots: Option, +} +pub struct InMemorySlotsDumper { + slots: Mutex>, +} +impl Default for InMemorySlotsDumper { + fn default() -> Self { + Self::new() + } +} +impl InMemorySlotsDumper { + /// Creates a new instance of `InMemorySlotsDumper`. + pub fn new() -> Self { + Self { + slots: Mutex::new(BTreeSet::new()), + } + } + + /// Retrieves the sorted keys in ascending order. + pub async fn get_sorted_keys(&self) -> Vec { + let slots = self.slots.lock().await; + slots.iter().cloned().collect() + } + + /// Clears the internal storage to reuse it. + pub async fn clear(&self) { + let mut slots = self.slots.lock().await; + slots.clear(); + } +} + +#[async_trait] +impl SlotsDumper for InMemorySlotsDumper { + async fn dump_slots(&self, slots: &[u64]) { + let mut storage = self.slots.lock().await; + for &slot in slots { + storage.insert(slot); + } + } +} + +pub fn get_last_persisted_slot(rocks_db: Arc) -> u64 { + let mut it = rocks_db + .db + .raw_iterator_cf(&rocks_db.db.cf_handle(RawBlock::NAME).unwrap()); + it.seek_to_last(); + if !it.valid() { + return 0; + } + it.key() + .map(|b| RawBlock::decode_key(b.to_vec()).unwrap_or_default()) + .unwrap_or_default() +} + +#[derive(Debug)] +enum FetchError { + Cancelled, + Other(String), +} + +async fn fetch_block_with_retries( + block_getter: Arc, + slot: u64, + shutdown_token: CancellationToken, +) -> Result<(u64, RawBlock), (u64, FetchError)> { + let retry_strategy = ExponentialBackoff::from_millis(INITIAL_DELAY_MS) + .factor(2) + .max_delay(Duration::from_secs(10)) + .take(MAX_RETRIES); + + RetryIf::spawn( + retry_strategy, + || { + let block_getter = block_getter.clone(); + let shutdown_token = shutdown_token.clone(); + async move { + if shutdown_token.is_cancelled() { + info!("Fetch cancelled for slot {} due to shutdown signal.", slot); + Err((slot, FetchError::Cancelled)) + } else { + debug!("Fetching slot {}", slot); + match block_getter + .get_block(slot, None::>) + .await + { + Ok(block_data) => { + debug!("Successfully fetched block for slot {}", slot); + Ok(( + slot, + RawBlock { + slot, + block: block_data, + }, + )) + } + Err(e) => { + error!("Error fetching block for slot {}: {}", slot, e); + Err((slot, FetchError::Other(e.to_string()))) + } + } + } + } + }, + |e: &(u64, FetchError)| { + let (_, ref err) = *e; + match err { + FetchError::Cancelled => false, // Do not retry if cancelled + _ => true, // Retry on other errors + } + }, + ) + .await +} + +#[tokio::main(flavor = "multi_thread")] +pub async fn main() -> Result<(), Box> { + // Initialize tracing subscriber for logging + tracing_subscriber::fmt::init(); + info!("Starting Slot persister..."); + + let args = Args::parse(); + + let mut metrics_state = MetricState::new(); + metrics_state.register_metrics(); + + start_metrics(metrics_state.registry, Some(args.metrics_port)).await; + // Open target RocksDB + let target_db = Arc::new( + SlotStorage::open( + &args.target_db_path, + Arc::new(tokio::sync::Mutex::new(tokio::task::JoinSet::new())), + metrics_state.red_metrics.clone(), + ) + .expect("Failed to open target RocksDB"), + ); + + let last_persisted_slot = get_last_persisted_slot(target_db.clone()); + let start_slot = if let Some(start_slot) = args.start_slot { + info!( + "Starting from slot: {}, while last persisted slot: {}", + start_slot, last_persisted_slot + ); + start_slot + } else { + info!("Starting from last persisted slot: {}", last_persisted_slot); + last_persisted_slot + }; + + let shutdown_token = CancellationToken::new(); + let shutdown_token_clone = shutdown_token.clone(); + let (shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); + + // Spawn a task to handle graceful shutdown on Ctrl+C + tokio::spawn(async move { + // Wait for Ctrl+C signal + match tokio::signal::ctrl_c().await { + Ok(()) => { + info!("Received Ctrl+C, shutting down gracefully..."); + shutdown_token_clone.cancel(); + shutdown_tx.send(()).unwrap(); + } + Err(err) => { + error!("Unable to listen for shutdown signal: {}", err); + } + } + }); + + let rpc_client = Arc::new(BackfillRPC::connect(args.rpc_host.clone())); + + let backfill_source = { + if let Some(ref bg_creds) = args.big_table_credentials { + Arc::new(BackfillSource::Bigtable(Arc::new( + BigTableClient::connect_new_with(bg_creds.clone(), args.big_table_timeout) + .await + .expect("expected to connect to big table"), + ))) + } else { + Arc::new(BackfillSource::Rpc(rpc_client.clone())) + } + }; + + let in_mem_dumper = Arc::new(InMemorySlotsDumper::new()); + let slots_collector = SlotsCollector::new( + in_mem_dumper.clone(), + backfill_source.clone(), + metrics_state.backfiller_metrics.clone(), + ); + let wait_period = Duration::from_secs(1); + // Check if slots are provided via --slots argument + let mut provided_slots = Vec::new(); + if let Some(ref slots_str) = args.slots { + // Parse comma-separated list of slots + info!("Processing specific slots provided via command line."); + for part in slots_str.split(',') { + let slot_str = part.trim(); + if let Ok(slot) = slot_str.parse::() { + provided_slots.push(slot); + } else { + warn!("Invalid slot number provided: {}", slot_str); + } + } + + // Remove duplicates and sort slots + let mut slots_set = HashSet::new(); + provided_slots = provided_slots + .into_iter() + .filter(|x| slots_set.insert(*x)) + .collect(); + provided_slots.sort_unstable(); + + if provided_slots.is_empty() { + error!("No valid slots to process. Exiting."); + return Ok(()); + } + + info!("Total slots to process: {}", provided_slots.len()); + + // Proceed to process the provided slots + process_slots( + provided_slots, + backfill_source, + target_db, + &args, + shutdown_token.clone(), + ) + .await; + return Ok(()); // Exit after processing provided slots + } + let mut start_slot = start_slot; + loop { + if shutdown_token.is_cancelled() { + info!("Shutdown signal received, exiting main loop..."); + break; + } + + match rpc_client.get_finalized_slot().await { + Ok(finalized_slot) => { + let last_slot_to_check = finalized_slot.saturating_sub(SLOT_COLLECTION_OFFSET); + info!( + "Finalized slot from RPC: {}, offsetting slot collection to: {}", + finalized_slot, last_slot_to_check + ); + let top_collected_slot = slots_collector + .collect_slots( + &blockbuster::programs::bubblegum::ID, + last_slot_to_check, + start_slot, + &shutdown_rx, + ) + .await; + if let Some(slot) = top_collected_slot { + start_slot = slot; + } + let slots = in_mem_dumper.get_sorted_keys().await; + in_mem_dumper.clear().await; + info!( + "Collected {} slots to persist between {} and {}", + slots.len(), + start_slot, + last_slot_to_check + ); + // slots has all the slots numbers we need to downlaod and persist. Slots should be downloaded concurrently, but no slot shouold be persisted if the previous slot is not persisted. + if slots.is_empty() { + info!("No new slots to process. Sleeping for {:?}", wait_period); + let sleep = tokio::time::sleep(wait_period); + + tokio::select! { + _ = sleep => {}, + _ = shutdown_token.cancelled() => { + info!("Received shutdown signal, stopping loop..."); + break; + }, + }; + continue; + } + // Process the collected slots + process_slots( + slots, + backfill_source.clone(), + target_db.clone(), + &args, + shutdown_token.clone(), + ) + .await; + } + Err(e) => { + error!("Error getting finalized slot: {}", e); + } + } + + let sleep = tokio::time::sleep(wait_period); + tokio::select! { + _ = sleep => {}, + _ = shutdown_token.cancelled() => { + info!("Received shutdown signal, stopping loop..."); + break; + }, + }; + } + info!("Slot persister has stopped."); + Ok(()) +} + +async fn process_slots( + slots: Vec, + backfill_source: Arc, + target_db: Arc, + args: &Args, + shutdown_token: CancellationToken, +) { + // Process slots in batches + for batch in slots.chunks(args.chunk_size) { + if shutdown_token.is_cancelled() { + info!("Shutdown signal received during batch processing, exiting..."); + break; + } + + let mut batch_retries = 0; + let mut batch_delay_ms = INITIAL_BATCH_DELAY_MS; + + // Initialize the list of slots to fetch and the map of successful blocks + let mut slots_to_fetch: Vec = batch.to_vec(); + let mut successful_blocks: HashMap = HashMap::new(); + + // Retry loop for the batch + loop { + if shutdown_token.is_cancelled() { + info!("Shutdown signal received during batch processing, exiting..."); + break; + } + + let semaphore = Arc::new(Semaphore::new(args.max_concurrency)); + + let fetch_futures = slots_to_fetch.iter().map(|&slot| { + let backfill_source = backfill_source.clone(); + let semaphore = semaphore.clone(); + let shutdown_token = shutdown_token.clone(); + + async move { + let _permit = semaphore.acquire().await; + fetch_block_with_retries(backfill_source, slot, shutdown_token).await + } + }); + + let results = join_all(fetch_futures).await; + + let mut new_failed_slots = Vec::new(); + + for result in results { + match result { + Ok((slot, raw_block)) => { + successful_blocks.insert(slot, raw_block); + } + Err((slot, e)) => { + new_failed_slots.push(slot); + error!("Failed to fetch slot {}: {:?}", slot, e); + } + } + } + + if new_failed_slots.is_empty() { + // All slots fetched successfully, save to database + debug!( + "All slots fetched successfully for current batch. Saving {} slots to RocksDB.", + successful_blocks.len() + ); + if let Err(e) = target_db + .raw_blocks_cbor + .put_batch_cbor(successful_blocks.clone()) + .await + { + error!("Failed to save blocks to RocksDB: {}", e); + // Handle error or retry saving as needed + batch_retries += 1; + if batch_retries >= MAX_BATCH_RETRIES { + panic!( + "Failed to save batch to RocksDB after {} retries. Discarding batch.", + MAX_BATCH_RETRIES + ); + } else { + warn!( + "Retrying batch save {}/{} after {} ms due to error.", + batch_retries, MAX_BATCH_RETRIES, batch_delay_ms + ); + tokio::time::sleep(Duration::from_millis(batch_delay_ms)).await; + batch_delay_ms *= 2; + } + } else { + // Successfully saved, proceed to next batch + let last_slot = successful_blocks.keys().max().cloned().unwrap_or(0); + info!( + "Successfully saved batch to RocksDB. Last stored slot: {}", + last_slot + ); + break; + } + } else { + batch_retries += 1; + if batch_retries >= MAX_BATCH_RETRIES { + panic!( + "Failed to fetch all slots in batch after {} retries. Discarding batch.", + MAX_BATCH_RETRIES + ); + } else { + warn!( + "Retrying failed slots {}/{} after {} ms: {:?}", + batch_retries, MAX_BATCH_RETRIES, batch_delay_ms, new_failed_slots + ); + slots_to_fetch = new_failed_slots; + // Exponential backoff before retrying + tokio::time::sleep(Duration::from_millis(batch_delay_ms)).await; + batch_delay_ms *= 2; + } + } + } + } +} diff --git a/nft_ingester/src/bin/synchronizer/main.rs b/nft_ingester/src/bin/synchronizer/main.rs index 9ff1efab9..0f2dcda5b 100644 --- a/nft_ingester/src/bin/synchronizer/main.rs +++ b/nft_ingester/src/bin/synchronizer/main.rs @@ -52,16 +52,7 @@ pub async fn main() -> Result<(), IngesterError> { metrics.register(&mut registry); let red_metrics = Arc::new(metrics_utils::red::RequestErrorDurationMetrics::new()); red_metrics.register(&mut registry); - tokio::spawn(async move { - match setup_metrics(registry, config.metrics_port).await { - Ok(_) => { - tracing::info!("Setup metrics successfully") - } - Err(e) => { - tracing::error!("Setup metrics failed: {:?}", e) - } - } - }); + metrics_utils::utils::start_metrics(registry, config.metrics_port).await; let index_storage = Arc::new( init_index_storage_with_migration( @@ -102,6 +93,7 @@ pub async fn main() -> Result<(), IngesterError> { graceful_stop( cloned_tasks, shutdown_tx, + None, guard, config.profiling_file_path_container, &config.heap_path, diff --git a/nft_ingester/src/bin/synchronizer_utils/main.rs b/nft_ingester/src/bin/synchronizer_utils/main.rs new file mode 100644 index 000000000..435d300e1 --- /dev/null +++ b/nft_ingester/src/bin/synchronizer_utils/main.rs @@ -0,0 +1,146 @@ +use clap::Parser; +use itertools::Itertools; +use nft_ingester::error::IngesterError; +use rocks_db::asset::AssetCompleteDetails; +use rocks_db::asset_generated::asset as fb; +use rocks_db::column::TypedColumn; +use rocks_db::key_encoders::decode_u64x2_pubkey; +use rocks_db::migrator::MigrationState; +use rocks_db::storage_traits::AssetIndexReader; +use rocks_db::storage_traits::AssetUpdateIndexStorage; +use rocks_db::Storage; +use solana_sdk::pubkey::Pubkey; +use std::collections::HashSet; +use std::path::PathBuf; +use std::str::FromStr; +use std::sync::Arc; +use tokio::sync::Mutex; +use tokio::task::JoinSet; +use tracing::info; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Path to the RocksDB instance with slots + #[arg(short, long)] + db_path: PathBuf, + + /// List of Pubkeys to fetch from the RPC and ingest into the RocksDB + #[arg(short, long, value_delimiter = ',', num_args = 0..)] + pubkeys_to_index: Option>, + + #[arg(short, long)] + index_after: Option, + + /// Base58-encoded owner public key to filter assets + #[arg(short, long)] + owner_pubkey: Option, + + #[arg(short, long, value_delimiter = ',', num_args = 0..)] + get_asset_maps_ids: Option>, +} + +#[tokio::main(flavor = "multi_thread")] +pub async fn main() -> Result<(), IngesterError> { + // Initialize tracing subscriber for logging + tracing_subscriber::fmt::init(); + info!("Starting sync util..."); + + let args = Args::parse(); + + let tx_storage_dir = tempfile::TempDir::new().unwrap(); + + let tasks = JoinSet::new(); + let mutexed_tasks = Arc::new(Mutex::new(tasks)); + let red_metrics = Arc::new(metrics_utils::red::RequestErrorDurationMetrics::new()); + + let storage = Storage::open_secondary( + &args.db_path, + &tx_storage_dir.path().to_path_buf(), + mutexed_tasks.clone(), + red_metrics.clone(), + MigrationState::Last, + ) + .unwrap(); + if let Some(index_after) = args.index_after { + // Decode the Base58 string back into Vec + let decoded_data = bs58::decode(&index_after) + .into_vec() + .expect("index after should be base58 encoded"); + let starting_key = decode_u64x2_pubkey(decoded_data).expect("Failed to decode index after"); + let (updated_keys, last_included_key) = storage + .fetch_asset_updated_keys(Some(starting_key), None, 500, None) + .unwrap(); + let index = storage + .get_asset_indexes(updated_keys.into_iter().collect_vec().as_slice()) + .await + .expect("Failed to get indexes"); + println!("{:?}", index); + } + if let Some(owner_pubkey) = args.owner_pubkey { + let owner_pubkey = Pubkey::from_str(&owner_pubkey).expect("Failed to parse owner pubkey"); + let owner_bytes = owner_pubkey.to_bytes(); + let mut matching_pubkeys = HashSet::new(); + let mut total_assets_processed = 0; + + let mut it = storage + .db + .raw_iterator_cf(&storage.db.cf_handle(AssetCompleteDetails::NAME).unwrap()); + it.seek_to_first(); + while it.valid() { + total_assets_processed += 1; + if let Some(value_bytes) = it.value() { + let data = fb::root_as_asset_complete_details(&value_bytes) + .expect("Failed to deserialize asset"); + + // Check if owner matches + if data + .owner() + .and_then(|o| o.owner()) + .and_then(|ow| ow.value()) + .filter(|owner| owner.bytes() == owner_bytes.as_slice()) + .is_some() + { + let asset: AssetCompleteDetails = AssetCompleteDetails::from(data); + println!("Matching asset: {:?}", asset); + matching_pubkeys.insert(asset.pubkey.clone()); + } + } + it.next(); + } + info!("Iteration completed."); + info!( + "Total assets processed: {}. Matches found: {}.", + total_assets_processed, + matching_pubkeys.len() + ); + + println!("Matching public keys:"); + for pubkey in &matching_pubkeys { + println!("{}", pubkey); + } + } + if let Some(pubkeys_to_index) = args.pubkeys_to_index { + let keys = pubkeys_to_index + .iter() + .map(|pk| Pubkey::from_str(pk).expect("invalid pubkey")) + .collect_vec(); + let index = storage + .get_asset_indexes(keys.as_slice()) + .await + .expect("Failed to get indexes"); + println!("{:?}", index); + } + if let Some(get_asset_maps_ids) = args.get_asset_maps_ids { + let keys = get_asset_maps_ids + .iter() + .map(|pk| Pubkey::from_str(pk).expect("invalid pubkey")) + .collect_vec(); + let maps = storage + .get_asset_selected_maps_async(keys, &None, &Default::default()) + .await + .expect("Failed to get asset selected maps"); + println!("{:?}", maps); + } + Ok(()) +} diff --git a/nft_ingester/src/cleaners/fork_cleaner.rs b/nft_ingester/src/cleaners/fork_cleaner.rs index f8d39c0ff..4176c8dc1 100644 --- a/nft_ingester/src/cleaners/fork_cleaner.rs +++ b/nft_ingester/src/cleaners/fork_cleaner.rs @@ -1,6 +1,7 @@ use entities::models::ForkedItem; use interface::fork_cleaner::{CompressedTreeChangesManager, ForkChecker}; use metrics_utils::ForkCleanerMetricsConfig; +use rocks_db::SlotStorage; use rocks_db::Storage; use solana_sdk::pubkey::Pubkey; use solana_sdk::signature::Signature; @@ -16,7 +17,7 @@ const CI_ITEMS_DELETE_BATCH_SIZE: usize = 100; const SLOT_CHECK_OFFSET: u64 = 1500; pub async fn run_fork_cleaner( - fork_cleaner: ForkCleaner, + fork_cleaner: ForkCleaner, metrics: Arc, mut rx: Receiver<()>, sequence_consistent_checker_wait_period_sec: u64, diff --git a/nft_ingester/src/config.rs b/nft_ingester/src/config.rs index 0300d81ae..633256a5e 100644 --- a/nft_ingester/src/config.rs +++ b/nft_ingester/src/config.rs @@ -152,16 +152,6 @@ pub struct IngesterConfig { #[serde(default = "default_rocks_backup_dir")] pub rocks_backup_dir: String, pub run_bubblegum_backfiller: bool, - #[serde(default = "default_dump_synchronizer_batch_size")] - pub dump_synchronizer_batch_size: usize, - #[serde(default = "default_dump_path")] - pub dump_path: String, - #[serde(default = "default_dump_sync_threshold")] - pub dump_sync_threshold: i64, - #[serde(default)] - pub run_dump_synchronize_on_start: bool, - #[serde(default)] - pub disable_synchronizer: bool, #[serde(default = "default_gapfiller_peer_addr")] pub gapfiller_peer_addr: String, pub peer_grpc_port: u16, @@ -280,6 +270,10 @@ pub struct SynchronizerConfig { pub heap_path: String, } +fn default_native_mint() -> String { + String::from("So11111111111111111111111111111111111111112") +} + #[derive(Deserialize, PartialEq, Debug, Clone)] pub struct ApiConfig { pub database_config: DatabaseConfig, @@ -314,6 +308,8 @@ pub struct ApiConfig { pub storage_service_base_url: Option, #[serde(default)] pub skip_check_tree_gaps: bool, + #[serde(default = "default_native_mint")] + pub native_mint_pubkey: String, } fn default_heap_path() -> String { diff --git a/nft_ingester/src/gapfiller.rs b/nft_ingester/src/gapfiller.rs index 15b54f0e6..e21a82714 100644 --- a/nft_ingester/src/gapfiller.rs +++ b/nft_ingester/src/gapfiller.rs @@ -109,39 +109,40 @@ pub async fn process_raw_blocks_stream( end_slot: u64, mut raw_blocks_consumer: impl RawBlocksConsumer, ) -> u64 { - let mut raw_blocks_streamer = match raw_blocks_consumer - .get_raw_blocks_consumable_stream_in_range(start_slot, end_slot) - .await - { - Ok(stream) => stream, - Err(e) => { - error!("Error consume raw blocks stream in range: {e}"); - return 0; - } - }; + // TODO: move to slot persister + // let mut raw_blocks_streamer = match raw_blocks_consumer + // .get_raw_blocks_consumable_stream_in_range(start_slot, end_slot) + // .await + // { + // Ok(stream) => stream, + // Err(e) => { + // error!("Error consume raw blocks stream in range: {e}"); + // return 0; + // } + // }; let mut processed_slots = 0; - while rx.is_empty() { - match raw_blocks_streamer.next().await { - Some(Ok(block)) => { - if let Some(e) = storage - .raw_blocks_cbor - .put_cbor_encoded(block.slot, block) - .await - .err() - { - error!("Error processing raw block: {e}") - } else { - processed_slots += 1; - } - } - Some(Err(e)) => { - error!("Error processing raw block stream item: {e}"); - } - None => return processed_slots, - } - } + // while rx.is_empty() { + // match raw_blocks_streamer.next().await { + // Some(Ok(block)) => { + // if let Some(e) = storage + // .raw_blocks_cbor + // .put_cbor_encoded(block.slot, block) + // .await + // .err() + // { + // error!("Error processing raw block: {e}") + // } else { + // processed_slots += 1; + // } + // } + // Some(Err(e)) => { + // error!("Error processing raw block stream item: {e}"); + // } + // None => return processed_slots, + // } + // } processed_slots } diff --git a/nft_ingester/src/index_syncronizer.rs b/nft_ingester/src/index_syncronizer.rs index a4196974a..e48c6a426 100644 --- a/nft_ingester/src/index_syncronizer.rs +++ b/nft_ingester/src/index_syncronizer.rs @@ -120,13 +120,8 @@ where asset_type: AssetType, ) -> Result { let last_indexed_key = self.index_storage.fetch_last_synced_id(asset_type).await?; - let last_indexed_key = match last_indexed_key { - Some(bytes) => { - let decoded_key = decode_u64x2_pubkey(bytes)?; - Some(decoded_key) - } - None => None, - }; + let last_indexed_key = last_indexed_key.map(decode_u64x2_pubkey).transpose()?; + // Fetch the last known key from the primary storage let last_key = match asset_type { AssetType::NonFungible => self.primary_storage.last_known_nft_asset_updated_key()?, @@ -147,6 +142,20 @@ where let last_known_seq = last_key.seq as i64; self.metrics .set_last_synchronized_slot("last_known_updated_seq", last_known_seq); + self.metrics + .set_last_synchronized_slot("last_known_updated_slot", last_key.slot as i64); + + self.metrics.set_last_synchronized_slot( + "last_synchronized_slot", + last_indexed_key + .as_ref() + .map(|k| k.slot) + .unwrap_or_default() as i64, + ); + self.metrics.set_last_synchronized_slot( + "last_synchronized_seq", + last_indexed_key.as_ref().map(|k| k.seq).unwrap_or_default() as i64, + ); if let Some(last_indexed_key) = &last_indexed_key { if last_indexed_key.seq >= last_key.seq { return Ok(SyncStatus::NoSyncRequired); @@ -513,7 +522,7 @@ where metrics: Arc, ) -> Result<(), IngesterError> { let asset_indexes = primary_storage - .get_nft_asset_indexes(updated_keys_refs, None) + .get_nft_asset_indexes(updated_keys_refs) .await?; if asset_indexes.is_empty() { @@ -522,13 +531,7 @@ where } index_storage - .update_nft_asset_indexes_batch( - asset_indexes - .values() - .cloned() - .collect::>() - .as_slice(), - ) + .update_nft_asset_indexes_batch(asset_indexes.as_slice()) .await?; metrics.inc_number_of_records_synchronized( "synchronized_records", @@ -554,11 +557,7 @@ where index_storage .update_fungible_asset_indexes_batch( - asset_indexes - .values() - .cloned() - .collect::>() - .as_slice(), + asset_indexes.as_slice(), ) .await?; metrics.inc_number_of_records_synchronized( @@ -580,7 +579,6 @@ mod tests { use mockall; use postgre_client::storage_traits::{MockAssetIndexStorageMock, MockTempClientProviderMock}; use rocks_db::storage_traits::MockAssetIndexStorage as MockPrimaryStorage; - use std::collections::HashMap; use tokio; fn create_test_asset_index(pubkey: &Pubkey) -> AssetIndex { @@ -709,18 +707,18 @@ mod tests { .once() .return_once(move |_, _, _, _| Ok((updated_keys.clone(), Some(index_clone)))); - let mut map_of_asset_indexes = HashMap::::new(); - map_of_asset_indexes.insert(key.clone(), create_test_asset_index(&key)); - let expected_indexes: Vec = map_of_asset_indexes.values().cloned().collect(); + let mut expected_indexes = Vec::::new(); + expected_indexes.push(create_test_asset_index(&key)); + let indexes_vec = expected_indexes.clone(); primary_storage .mock_asset_index_reader .expect_get_nft_asset_indexes() .once() - .return_once(move |_, _| Ok(map_of_asset_indexes)); - + .return_once(move |_| Ok(indexes_vec)); + let indexes_vec = expected_indexes.clone(); index_storage .expect_update_nft_asset_indexes_batch() - .with(mockall::predicate::eq(expected_indexes.clone())) + .with(mockall::predicate::eq(indexes_vec)) .once() .return_once(|_| Ok(())); index_storage @@ -813,14 +811,14 @@ mod tests { } }); - let mut map_of_asset_indexes = HashMap::::new(); - map_of_asset_indexes.insert(key.clone(), create_test_asset_index(&key)); - let expected_indexes: Vec = map_of_asset_indexes.values().cloned().collect(); + let mut expected_indexes = Vec::::new(); + expected_indexes.push(create_test_asset_index(&key)); + let indexes_vec = expected_indexes.clone(); primary_storage .mock_asset_index_reader .expect_get_nft_asset_indexes() .once() - .return_once(move |_, _| Ok(map_of_asset_indexes)); + .return_once(move |_| Ok(indexes_vec)); index_storage .expect_update_nft_asset_indexes_batch() @@ -936,25 +934,22 @@ mod tests { } }); - let mut map_of_asset_indexes = HashMap::::new(); - map_of_asset_indexes.insert(key.clone(), create_test_asset_index(&key)); - let expected_indexes_first_batch: Vec = - map_of_asset_indexes.values().cloned().collect(); - - let expected_indexes_second_batch: Vec = - map_of_asset_indexes.values().cloned().collect(); - let second_call_map = map_of_asset_indexes.clone(); + let mut expected_indexes_first_batch = Vec::::new(); + expected_indexes_first_batch.push(create_test_asset_index(&key)); + let indexes_first_batch_vec = expected_indexes_first_batch.clone(); + let indexes_second_batch_vec = expected_indexes_first_batch.clone(); + let expected_indexes_second_batch: Vec = expected_indexes_first_batch.clone(); let mut call_count2 = 0; primary_storage .mock_asset_index_reader .expect_get_nft_asset_indexes() .times(2) - .returning(move |_, _| { + .returning(move |_| { call_count2 += 1; if call_count2 == 1 { - Ok(map_of_asset_indexes.clone()) + Ok(indexes_first_batch_vec.clone()) } else { - Ok(second_call_map.clone()) + Ok(indexes_second_batch_vec.clone()) } }); diff --git a/nft_ingester/src/init.rs b/nft_ingester/src/init.rs index 4852711f6..09c429fd7 100644 --- a/nft_ingester/src/init.rs +++ b/nft_ingester/src/init.rs @@ -16,6 +16,7 @@ use tokio::process::Command; use tokio::sync::broadcast::Sender; use tokio::sync::Mutex; use tokio::task::{JoinError, JoinSet}; +use tokio_util::sync::CancellationToken; use tracing::error; const MALLOC_CONF_ENV: &str = "MALLOC_CONF"; @@ -85,12 +86,16 @@ pub async fn init_primary_storage( pub async fn graceful_stop( tasks: Arc>>>, shutdown_tx: Sender<()>, + shutdown_token: Option, guard: Option>, profile_path: Option, heap_path: &str, ) { usecase::graceful_stop::listen_shutdown().await; let _ = shutdown_tx.send(()); + if let Some(token) = shutdown_token { + token.cancel(); + } if let Some(guard) = guard { if let Ok(report) = guard.report().build() { diff --git a/nft_ingester/src/json_worker.rs b/nft_ingester/src/json_worker.rs index d4778c093..953ebb886 100644 --- a/nft_ingester/src/json_worker.rs +++ b/nft_ingester/src/json_worker.rs @@ -4,13 +4,15 @@ use async_trait::async_trait; use entities::enums::TaskStatus; use entities::models::{JsonDownloadTask, OffChainData}; use interface::error::JsonDownloaderError; -use interface::json::{JsonDownloader, JsonPersister}; +use interface::json::{JsonDownloadResult, JsonDownloader, JsonPersister}; +use metrics_utils::red::RequestErrorDurationMetrics; use metrics_utils::{JsonDownloaderMetricsConfig, MetricStatus}; use postgre_client::tasks::UpdatedTask; use postgre_client::PgClient; use reqwest::{Client, ClientBuilder}; use rocks_db::asset_previews::UrlToDownload; use rocks_db::Storage; +use serde_json::Value; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::broadcast::Receiver; @@ -19,8 +21,8 @@ use tokio::sync::{mpsc, Mutex}; use tokio::task::JoinSet; use tokio::time::{self, Duration, Instant}; use tracing::{debug, error}; +use url::Url; -pub const JSON_CONTENT_TYPE: &str = "application/json"; pub const JSON_BATCH: usize = 300; pub const WIPE_PERIOD_SEC: u64 = 60; pub const SLEEP_TIME: u64 = 1; @@ -31,6 +33,7 @@ pub struct JsonWorker { pub rocks_db: Arc, pub num_of_parallel_workers: i32, pub metrics: Arc, + pub red_metrics: Arc, } impl JsonWorker { @@ -38,6 +41,7 @@ impl JsonWorker { db_client: Arc, rocks_db: Arc, metrics: Arc, + red_metrics: Arc, ) -> Self { let config: IngesterConfig = setup_config(INGESTER_CONFIG_PREFIX); @@ -45,6 +49,7 @@ impl JsonWorker { db_client, num_of_parallel_workers: config.parallel_json_downloaders, metrics, + red_metrics, rocks_db, } } @@ -107,13 +112,17 @@ impl TasksStreamer { pub struct TasksPersister { pub persister: Arc, - pub receiver: tokio::sync::mpsc::Receiver<(String, Result)>, + pub receiver: + tokio::sync::mpsc::Receiver<(String, Result)>, } impl TasksPersister { pub fn new( persister: Arc, - receiver: tokio::sync::mpsc::Receiver<(String, Result)>, + receiver: tokio::sync::mpsc::Receiver<( + String, + Result, + )>, ) -> Self { Self { persister, @@ -257,47 +266,97 @@ pub async fn run(json_downloader: Arc, rx: Receiver<()>) { #[async_trait] impl JsonDownloader for JsonWorker { - async fn download_file(&self, url: String) -> Result { + async fn download_file(&self, url: String) -> Result { + let start_time = chrono::Utc::now(); let client = ClientBuilder::new() .timeout(time::Duration::from_secs(CLIENT_TIMEOUT)) .build() .map_err(|e| { JsonDownloaderError::ErrorDownloading(format!("Failed to create client: {:?}", e)) })?; - let response = Client::get(&client, url) + + // Detect if the URL is an IPFS link + let parsed_url = if url.starts_with("ipfs://") { + // Extract the IPFS hash or path + let ipfs_path = url.trim_start_matches("ipfs://"); + // Choose an IPFS gateway (you can change this to your preferred gateway) + let gateway_url = format!("https://ipfs.io/ipfs/{}", ipfs_path); + // Parse the rewritten URL + let parsed_url = Url::parse(&gateway_url).map_err(|e| { + JsonDownloaderError::ErrorDownloading(format!("Failed to parse IPFS URL: {:?}", e)) + })?; + parsed_url + } else { + // Parse the original URL + let parsed_url = Url::parse(&url).map_err(|e| { + JsonDownloaderError::ErrorDownloading(format!("Failed to parse URL: {:?}", e)) + })?; + parsed_url + }; + + let host = parsed_url.host_str().unwrap_or("no_host"); + + let response = client + .get(parsed_url.clone()) .send() .await .map_err(|e| format!("Failed to make request: {:?}", e)); match response { Ok(response) => { - if let Some(content_header) = response.headers().get("Content-Type") { - match content_header.to_str() { - Ok(header) => { - if !header.contains(JSON_CONTENT_TYPE) { - return Err(JsonDownloaderError::GotNotJsonFile); - } - } - Err(_) => { - return Err(JsonDownloaderError::CouldNotReadHeader); - } - } - } - + self.red_metrics.observe_request( + "json_downloader", + "download_file", + host, + start_time, + ); if response.status() != reqwest::StatusCode::OK { return Err(JsonDownloaderError::ErrorStatusCode( response.status().as_str().to_string(), )); - } else { - let metadata_body = response.text().await; - if let Ok(metadata) = metadata_body { - return Ok(metadata.trim().replace('\0', "")); + } + + // Get the Content-Type header + let content_type = response + .headers() + .get("Content-Type") + .and_then(|ct| ct.to_str().ok()) + .unwrap_or(""); + + // Excluded content types that are definitely not JSON + let excluded_types = ["audio/", "application/octet-stream"]; + if excluded_types.iter().any(|&t| content_type.starts_with(t)) { + return Err(JsonDownloaderError::GotNotJsonFile); + } + + // Check if the content type is image or video + if content_type.starts_with("image/") || content_type.starts_with("video/") { + // Return the URL and MIME type + return Ok(JsonDownloadResult::MediaUrlAndMimeType { + url: url.clone(), + mime_type: content_type.to_string(), + }); + } + + let metadata_body = response.text().await; + if let Ok(metadata) = metadata_body { + // Attempt to parse the response as JSON + if serde_json::from_str::(&metadata).is_ok() { + return Ok(JsonDownloadResult::JsonContent( + metadata.trim().replace('\0', ""), + )); } else { - Err(JsonDownloaderError::CouldNotDeserialize) + return Err(JsonDownloaderError::CouldNotDeserialize); } + } else { + Err(JsonDownloaderError::CouldNotDeserialize) } } - Err(e) => Err(JsonDownloaderError::ErrorDownloading(e.to_string())), + Err(e) => { + self.red_metrics + .observe_error("json_downloader", "download_file", host); + Err(JsonDownloaderError::ErrorDownloading(e.to_string())) + } } } } @@ -306,14 +365,14 @@ impl JsonDownloader for JsonWorker { impl JsonPersister for JsonWorker { async fn persist_response( &self, - results: Vec<(String, Result)>, + results: Vec<(String, Result)>, ) -> Result<(), JsonDownloaderError> { let mut pg_updates = Vec::new(); let mut rocks_updates = HashMap::new(); for (metadata_url, result) in results.iter() { - match &result { - Ok(json_file) => { + match result { + Ok(JsonDownloadResult::JsonContent(json_file)) => { rocks_updates.insert( metadata_url.clone(), OffChainData { @@ -329,7 +388,27 @@ impl JsonPersister for JsonWorker { self.metrics.inc_tasks("json", MetricStatus::SUCCESS); } + Ok(JsonDownloadResult::MediaUrlAndMimeType { url, mime_type }) => { + pg_updates.push(UpdatedTask { + status: TaskStatus::Success, + metadata_url: metadata_url.clone(), + error: "".to_string(), + }); + rocks_updates.insert( + metadata_url.clone(), + OffChainData { + url: metadata_url.clone(), + metadata: format!( + "{{\"image\":\"{}\",\"type\":\"{}\"}}", + url, mime_type + ) + .to_string(), + }, + ); + self.metrics.inc_tasks("media", MetricStatus::SUCCESS); + } Err(json_err) => match json_err { + // TODO: this is bullshit, we should handle this in a different way - it's not success JsonDownloaderError::GotNotJsonFile => { pg_updates.push(UpdatedTask { status: TaskStatus::Success, @@ -373,15 +452,14 @@ impl JsonPersister for JsonWorker { } JsonDownloaderError::ErrorDownloading(err) => { self.metrics.inc_tasks("unknown", MetricStatus::FAILURE); - // back to pending status to try again - // until attempts reach its maximum + // Revert to pending status to retry until max attempts pg_updates.push(UpdatedTask { status: TaskStatus::Pending, metadata_url: metadata_url.clone(), error: err.clone(), }); } - _ => {} // intentionally empty because nothing to process + _ => {} // No additional processing needed }, } } diff --git a/nft_ingester/src/message_parser.rs b/nft_ingester/src/message_parser.rs index 8659ed757..74e7995ea 100644 --- a/nft_ingester/src/message_parser.rs +++ b/nft_ingester/src/message_parser.rs @@ -61,7 +61,7 @@ impl MessageParser { } } - pub(crate) fn parse_transaction( + pub fn parse_transaction( &self, data: Vec, map_flatbuffer: bool, diff --git a/nft_ingester/src/processors/account_based/mpl_core_processor.rs b/nft_ingester/src/processors/account_based/mpl_core_processor.rs index 4a599baf8..0b3f8bb81 100644 --- a/nft_ingester/src/processors/account_based/mpl_core_processor.rs +++ b/nft_ingester/src/processors/account_based/mpl_core_processor.rs @@ -7,13 +7,14 @@ use entities::models::{ }; use heck::ToSnakeCase; use metrics_utils::IngesterMetricsConfig; -use rocks_db::asset::AssetCollection; +use rocks_db::asset::{AssetCollection, AssetCompleteDetails}; use rocks_db::batch_savers::{BatchSaveStorage, MetadataModels}; use rocks_db::errors::StorageError; use rocks_db::{AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails}; use serde_json::Map; use serde_json::{json, Value}; use solana_program::pubkey::Pubkey; +use std::collections::BTreeMap; use std::sync::Arc; use std::time::Instant; use usecase::save_metrics::result_to_metrics; @@ -41,7 +42,8 @@ impl MplCoreProcessor { }; let begin_processing = Instant::now(); - let res = storage.store_metadata_models(&metadata_models); + let asset = AssetCompleteDetails::from(&metadata_models); + let res = storage.store_metadata_models(&asset, metadata_models.metadata_mint); result_to_metrics( self.metrics.clone(), &res, @@ -75,9 +77,9 @@ impl MplCoreProcessor { // If it is an `Address` type, use the value directly. If it is a `Collection`, search for and // use the collection's authority. let update_authority = match asset.update_authority { - UpdateAuthority::Address(address) => address, + UpdateAuthority::Address(address) => Some(address), UpdateAuthority::Collection(address) => storage.get_authority(address), - UpdateAuthority::None => Pubkey::default(), + UpdateAuthority::None => None, }; let name = asset.name.clone(); @@ -112,7 +114,7 @@ impl MplCoreProcessor { let (owner, class) = match account_data.indexable_asset.clone() { MplCoreAccountData::Asset(_) => (asset.owner, SpecificationAssetClass::MplCoreAsset), MplCoreAccountData::Collection(_) => ( - Some(update_authority), + update_authority, SpecificationAssetClass::MplCoreCollection, ), _ => return Ok(None), @@ -132,7 +134,14 @@ impl MplCoreProcessor { }) .unwrap_or((0, &default_creators)); - let mut plugins_json = serde_json::to_value(&asset.plugins) + // convert HashMap plugins into BTreeMap to have always same plugins order + // for example without ordering 2 assets with same plugins can have different order saved in DB + // it affects only API response and tests + let ordered_plugins: BTreeMap<_, _> = asset.plugins + .iter() + .map(|(key, value)| (format!("{:?}", key), value)) + .collect(); + let mut plugins_json = serde_json::to_value(&ordered_plugins) .map_err(|e| IngesterError::DeserializationError(e.to_string()))?; // Improve JSON output. @@ -185,7 +194,7 @@ impl MplCoreProcessor { .get(&PluginType::TransferDelegate) .and_then(|plugin_schema| match &plugin_schema.authority { PluginAuthority::Owner => owner, - PluginAuthority::UpdateAuthority => Some(update_authority), + PluginAuthority::UpdateAuthority => update_authority, PluginAuthority::Address { address } => Some(*address), PluginAuthority::None => None, }); @@ -209,7 +218,7 @@ impl MplCoreProcessor { account_data.indexable_asset, MplCoreAccountData::Collection(_) ) { - Some(update_authority) + update_authority } else { None }; @@ -253,12 +262,16 @@ impl MplCoreProcessor { created_at: account_data.slot_updated as i64, edition_address: None, }); - models.asset_authority = Some(AssetAuthority { - pubkey: asset_key, - authority: update_authority, - slot_updated: account_data.slot_updated, - write_version: Some(account_data.write_version), - }); + if let Some(upd_auth) = update_authority { + models.asset_authority = Some(AssetAuthority { + pubkey: asset_key, + authority: upd_auth, + slot_updated: account_data.slot_updated, + write_version: Some(account_data.write_version), + }); + } else { + models.asset_authority = None; + } models.asset_owner = Some(AssetOwner { pubkey: asset_key, owner: Updated::new( @@ -277,6 +290,11 @@ impl MplCoreProcessor { Some(UpdateVersion::WriteVersion(account_data.write_version)), ownership_type, ), + is_current_owner: Updated::new( + account_data.slot_updated, + Some(UpdateVersion::WriteVersion(account_data.write_version)), + true, + ), ..Default::default() }); models.asset_dynamic = Some(AssetDynamicDetails { @@ -312,11 +330,8 @@ impl MplCoreProcessor { Some(UpdateVersion::WriteVersion(account_data.write_version)), false, ), - was_decompressed: Updated::new( - account_data.slot_updated, - Some(UpdateVersion::WriteVersion(account_data.write_version)), - false, - ), + // should not set this value for Core assets + was_decompressed: None, onchain_data: Some(Updated::new( account_data.slot_updated, Some(UpdateVersion::WriteVersion(account_data.write_version)), diff --git a/nft_ingester/src/processors/account_based/mplx_updates_processor.rs b/nft_ingester/src/processors/account_based/mplx_updates_processor.rs index f5dd04af3..8967bc12c 100644 --- a/nft_ingester/src/processors/account_based/mplx_updates_processor.rs +++ b/nft_ingester/src/processors/account_based/mplx_updates_processor.rs @@ -13,7 +13,8 @@ use entities::models::{BurntMetadataSlot, MetadataInfo, Updated}; use entities::models::{ChainDataV1, Creator, UpdateVersion, Uses}; use metrics_utils::IngesterMetricsConfig; use rocks_db::asset::{ - AssetAuthority, AssetCollection, AssetDynamicDetails, AssetStaticDetails, MetadataMintMap, + AssetAuthority, AssetCollection, AssetCompleteDetails, AssetDynamicDetails, AssetStaticDetails, + MetadataMintMap, }; use rocks_db::batch_savers::{BatchSaveStorage, MetadataModels}; use rocks_db::errors::StorageError; @@ -54,7 +55,8 @@ impl MplxAccountsProcessor { let metadata_models = self.create_rocks_metadata_models(key, metadata_info); let begin_processing = Instant::now(); - let res = storage.store_metadata_models(&metadata_models); + let asset = AssetCompleteDetails::from(&metadata_models); + let res = storage.store_metadata_models(&asset, metadata_models.metadata_mint); result_to_metrics( self.metrics.clone(), &res, @@ -159,11 +161,8 @@ impl MplxAccountsProcessor { false, ), seq: None, - was_decompressed: Updated::new( - metadata_info.slot_updated, - Some(UpdateVersion::WriteVersion(metadata_info.write_version)), - false, - ), + // should not set this value for regular NFT updates + was_decompressed: None, onchain_data: Some(Updated::new( metadata_info.slot_updated, Some(UpdateVersion::WriteVersion(metadata_info.write_version)), diff --git a/nft_ingester/src/processors/account_based/token_updates_processor.rs b/nft_ingester/src/processors/account_based/token_updates_processor.rs index 21fb80695..7f4af0257 100644 --- a/nft_ingester/src/processors/account_based/token_updates_processor.rs +++ b/nft_ingester/src/processors/account_based/token_updates_processor.rs @@ -1,12 +1,14 @@ -use entities::enums::OwnerType; +use entities::enums::{OwnerType, SpecificationAssetClass}; use entities::models::{Mint, TokenAccount, UpdateVersion, Updated}; use metrics_utils::IngesterMetricsConfig; -use rocks_db::asset::{AssetDynamicDetails, AssetOwner}; +use rocks_db::asset::{AssetCompleteDetails, AssetDynamicDetails, AssetOwner}; use rocks_db::batch_savers::BatchSaveStorage; use rocks_db::errors::StorageError; +use rocks_db::AssetStaticDetails; use solana_program::pubkey::Pubkey; use std::sync::Arc; use tokio::time::Instant; +use usecase::response_prettier::filter_non_null_fields; use usecase::save_metrics::result_to_metrics; pub struct TokenAccountsProcessor { @@ -58,7 +60,7 @@ impl TokenAccountsProcessor { ) -> Result<(), StorageError> { self.save_token_account_with_idxs(storage, key, token_account)?; let asset_owner_details = AssetOwner { - pubkey: token_account.mint, + pubkey: token_account.pubkey, owner: Updated::new( token_account.slot_updated as u64, Some(UpdateVersion::WriteVersion(token_account.write_version)), @@ -75,6 +77,11 @@ impl TokenAccountsProcessor { Some(UpdateVersion::WriteVersion(token_account.write_version)), None, ), + is_current_owner: Updated::new( + token_account.slot_updated as u64, + Some(UpdateVersion::WriteVersion(token_account.write_version)), + token_account.amount == 1, + ), }; let asset_dynamic_details = AssetDynamicDetails { pubkey: token_account.mint, @@ -89,8 +96,13 @@ impl TokenAccountsProcessor { self.finalize_processing( storage, |storage: &mut BatchSaveStorage| { - storage.store_owner(&asset_owner_details)?; - storage.store_dynamic(&asset_dynamic_details) + let asset = &AssetCompleteDetails { + pubkey: token_account.mint, + owner: Some(asset_owner_details.clone()), + dynamic_details: Some(asset_dynamic_details.clone()), + ..Default::default() + }; + storage.store_complete(asset) }, "token_accounts_asset_components_merge_with_batch", )?; @@ -107,6 +119,13 @@ impl TokenAccountsProcessor { storage: &mut BatchSaveStorage, mint: &Mint, ) -> Result<(), StorageError> { + let asset_static_details = mint.extensions.as_ref().map(|_| AssetStaticDetails { + pubkey: mint.pubkey, + specification_asset_class: SpecificationAssetClass::FungibleToken, + created_at: mint.slot_updated, + royalty_target_type: entities::enums::RoyaltyTargetType::Creators, + edition_address: None, + }); let mint_extensions = mint .extensions .as_ref() @@ -114,6 +133,17 @@ impl TokenAccountsProcessor { serde_json::to_value(extensions).map_err(|e| StorageError::Common(e.to_string())) }) .transpose()?; + let metadata = mint + .extensions + .as_ref() + .and_then(|extensions| extensions.metadata.clone()); + let metadata_json = metadata + .as_ref() + .map(|metadata| { + serde_json::to_value(metadata.clone()) + .map_err(|e| StorageError::Common(e.to_string())) + }) + .transpose()?; let asset_dynamic_details = AssetDynamicDetails { pubkey: mint.pubkey, supply: Some(Updated::new( @@ -121,28 +151,44 @@ impl TokenAccountsProcessor { Some(UpdateVersion::WriteVersion(mint.write_version)), mint.supply as u64, )), - mint_extensions: mint_extensions.map(|mint_extensions| { + mint_extensions: filter_non_null_fields(mint_extensions.as_ref()).map( + |mint_extensions| { + Updated::new( + mint.slot_updated as u64, + Some(UpdateVersion::WriteVersion(mint.write_version)), + mint_extensions.to_string(), + ) + }, + ), + url: metadata + .as_ref() + .map(|metadata| { + Updated::new( + mint.slot_updated as u64, + Some(UpdateVersion::WriteVersion(mint.write_version)), + metadata.uri.clone(), + ) + }) + .unwrap_or_default(), + onchain_data: metadata_json.map(|metadata_json| { Updated::new( mint.slot_updated as u64, Some(UpdateVersion::WriteVersion(mint.write_version)), - mint_extensions.to_string(), + metadata_json.to_string(), ) }), - url: mint - .extensions - .as_ref() - .and_then(|extensions| { - extensions.metadata.as_ref().map(|metadata| { - Updated::new( - mint.slot_updated as u64, - Some(UpdateVersion::WriteVersion(mint.write_version)), - metadata.uri.clone(), - ) - }) - }) - .unwrap_or_default(), + raw_name: metadata.map(|metadata| { + Updated::new( + mint.slot_updated as u64, + Some(UpdateVersion::WriteVersion(mint.write_version)), + metadata.name.clone(), + ) + }), + // TODO: raw_symbol + // raw_symbol: ActiveValue::Set(Some(metadata.symbol.clone().into_bytes().to_vec())), ..Default::default() }; + let owner_type_value = if mint.supply > 1 { OwnerType::Token } else { @@ -161,8 +207,14 @@ impl TokenAccountsProcessor { self.finalize_processing( storage, |storage: &mut BatchSaveStorage| { - storage.store_owner(&asset_owner_details)?; - storage.store_dynamic(&asset_dynamic_details)?; + let asset = &AssetCompleteDetails { + pubkey: mint.pubkey, + static_details: asset_static_details.clone(), + owner: Some(asset_owner_details.clone()), + dynamic_details: Some(asset_dynamic_details.clone()), + ..Default::default() + }; + storage.store_complete(asset)?; storage.store_spl_mint(mint) }, "mint_accounts_merge_with_batch", diff --git a/nft_ingester/src/processors/accounts_processor.rs b/nft_ingester/src/processors/accounts_processor.rs index a632ff20b..f31bf7c7e 100644 --- a/nft_ingester/src/processors/accounts_processor.rs +++ b/nft_ingester/src/processors/accounts_processor.rs @@ -180,7 +180,7 @@ impl AccountsProcessor { .await; } - async fn process_account( + pub async fn process_account( &self, batch_storage: &mut BatchSaveStorage, unprocessed_accounts: Vec, diff --git a/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs b/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs index 33aa631f5..da2571826 100644 --- a/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs +++ b/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs @@ -15,8 +15,7 @@ use entities::enums::{ SpecificationAssetClass, TokenStandard, UseMethod, }; use entities::models::{ - BatchMintToVerify, BufferedTransaction, OffChainData, SignatureWithSlot, Task, UpdateVersion, - Updated, + BatchMintToVerify, BufferedTransaction, OffChainData, SignatureWithSlot, UpdateVersion, Updated, }; use entities::models::{ChainDataV1, Creator, Uses}; use lazy_static::lazy_static; @@ -40,7 +39,6 @@ use solana_sdk::signature::Signature; use std::collections::{HashSet, VecDeque}; use std::str::FromStr; use std::sync::Arc; -use tokio::sync::Mutex; use tokio::time::Instant; use tracing::{debug, error}; use usecase::save_metrics::result_to_metrics; @@ -61,21 +59,15 @@ pub struct BubblegumTxProcessor { pub instruction_parser: Arc, pub rocks_client: Arc, - pub json_tasks: Arc>>, pub metrics: Arc, } impl BubblegumTxProcessor { - pub fn new( - rocks_client: Arc, - metrics: Arc, - json_tasks: Arc>>, - ) -> Self { + pub fn new(rocks_client: Arc, metrics: Arc) -> Self { BubblegumTxProcessor { transaction_parser: Arc::new(FlatbufferMapper {}), instruction_parser: Arc::new(BubblegumParser {}), rocks_client, - json_tasks, metrics, } } @@ -408,6 +400,11 @@ impl BubblegumTxProcessor { Some(UpdateVersion::Sequence(cl.seq)), Some(cl.seq), ), + is_current_owner: Updated::new( + bundle.slot, + Some(UpdateVersion::Sequence(cl.seq)), + true, + ), }; let asset_update = AssetUpdateEvent { update: Some(AssetDynamicUpdate { @@ -651,6 +648,11 @@ impl BubblegumTxProcessor { Some(UpdateVersion::Sequence(cl.seq)), Some(cl.seq), ), + is_current_owner: Updated::new( + slot, + Some(UpdateVersion::Sequence(cl.seq)), + true, + ), }; asset_update.owner_update = Some(AssetUpdate { pk: id, @@ -740,9 +742,10 @@ impl BubblegumTxProcessor { pk: *asset_id, details: AssetDynamicDetails { pubkey: *asset_id, - was_decompressed: Updated::new(bundle.slot, None, true), + was_decompressed: Some(Updated::new(bundle.slot, None, true)), is_compressible: Updated::new(bundle.slot, None, false), supply: Some(Updated::new(bundle.slot, None, 1)), + seq: Some(Updated::new(bundle.slot, None, 0)), ..Default::default() }, } @@ -855,6 +858,11 @@ impl BubblegumTxProcessor { Some(UpdateVersion::Sequence(cl.seq)), Some(cl.seq), ), + is_current_owner: Updated::new( + bundle.slot, + Some(UpdateVersion::Sequence(cl.seq)), + true, + ), }; asset_update.owner_update = Some(AssetUpdate { pk: id, diff --git a/nft_ingester/src/raydium_price_fetcher.rs b/nft_ingester/src/raydium_price_fetcher.rs index 0f5a48b88..6dadf7363 100644 --- a/nft_ingester/src/raydium_price_fetcher.rs +++ b/nft_ingester/src/raydium_price_fetcher.rs @@ -2,39 +2,57 @@ use crate::error::IngesterError; use async_trait::async_trait; use interface::error::UsecaseError; use interface::price_fetcher::TokenPriceFetcher; +use metrics_utils::red::RequestErrorDurationMetrics; use moka::future::Cache; use solana_program::pubkey::Pubkey; use std::collections::HashMap; +use std::sync::Arc; -const CACHE_TTL: std::time::Duration = std::time::Duration::from_secs(60); +pub const CACHE_TTL: std::time::Duration = std::time::Duration::from_secs(60); pub struct RaydiumTokenPriceFetcher { host: String, price_cache: Cache, symbol_cache: Cache, + red_metrics: Option>, } impl Default for RaydiumTokenPriceFetcher { fn default() -> Self { - Self::new("https://api-v3.raydium.io".to_string(), CACHE_TTL) + Self::new("https://api-v3.raydium.io".to_string(), CACHE_TTL, None) } } impl RaydiumTokenPriceFetcher { - pub fn new(host: String, ttl: std::time::Duration) -> Self { + pub fn new( + host: String, + ttl: std::time::Duration, + red_metrics: Option>, + ) -> Self { Self { host, price_cache: Cache::builder().time_to_live(ttl).build(), symbol_cache: Cache::builder().time_to_live(ttl).build(), + red_metrics, } } async fn get(&self, endpoint: &str) -> Result { - reqwest::get(format!("{host}/{ep}", host = self.host, ep = endpoint)) + let start_time = chrono::Utc::now(); + let response = reqwest::get(format!("{host}/{ep}", host = self.host, ep = endpoint)) .await? .json() .await - .map_err(Into::into) + .map_err(Into::into); + if let Some(red_metrics) = &self.red_metrics { + // cut the part after ? in the endpoint for metrics + let endpoint = endpoint.split('?').next().unwrap_or(endpoint); + match &response { + Ok(_) => red_metrics.observe_request("raydium", "get", endpoint, start_time), + Err(_) => red_metrics.observe_error("raydium", "get", endpoint), + } + } + response } } @@ -42,7 +60,7 @@ impl RaydiumTokenPriceFetcher { impl TokenPriceFetcher for RaydiumTokenPriceFetcher { async fn fetch_token_symbols( &self, - token_ids: &[Pubkey], + token_ids: &[String], ) -> Result, UsecaseError> { let token_ids_str: Vec = token_ids.iter().map(ToString::to_string).collect(); let mut result = HashMap::with_capacity(token_ids.len()); @@ -93,7 +111,7 @@ impl TokenPriceFetcher for RaydiumTokenPriceFetcher { async fn fetch_token_prices( &self, - token_ids: &[Pubkey], + token_ids: &[String], ) -> Result, UsecaseError> { let token_ids_str: Vec = token_ids.iter().map(ToString::to_string).collect(); let mut result = HashMap::with_capacity(token_ids.len()); diff --git a/nft_ingester/tests/api_tests.rs b/nft_ingester/tests/api_tests.rs index fccea61c5..dd9edf34b 100644 --- a/nft_ingester/tests/api_tests.rs +++ b/nft_ingester/tests/api_tests.rs @@ -7,6 +7,7 @@ mod tests { ShadowInterestBearingConfig, ShadowTransferFee, ShadowTransferFeeConfig, UnixTimestamp, }; use blockbuster::programs::token_extensions::MintAccountExtensions; + use rocks_db::column::TypedColumn; use nft_ingester::cleaners::indexer_cleaner::clean_syncronized_idxs; use std::str::FromStr; @@ -51,7 +52,7 @@ mod tests { config::JsonMiddlewareConfig, json_worker::JsonWorker, processors::account_based::token_updates_processor::TokenAccountsProcessor, }; - use rocks_db::asset::AssetLeaf; + use rocks_db::asset::{AssetCompleteDetails, AssetLeaf}; use rocks_db::batch_savers::BatchSaveStorage; use rocks_db::inscriptions::{Inscription, InscriptionData}; use rocks_db::tree_seq::TreesGaps; @@ -605,25 +606,26 @@ mod tests { .put(metadata.url.clone(), metadata) .unwrap(); + let asset_complete_details = AssetCompleteDetails { + pubkey: pb, + static_details: Some(asset_static_details), + dynamic_details: Some(dynamic_details), + authority: Some(asset_authority), + owner: Some(owner), + ..Default::default() + }; env.rocks_env .storage - .asset_static_data - .put(pb, asset_static_details) - .unwrap(); - env.rocks_env - .storage - .asset_dynamic_data - .put(pb, dynamic_details) - .unwrap(); - env.rocks_env - .storage - .asset_authority_data - .put(pb, asset_authority) - .unwrap(); - env.rocks_env - .storage - .asset_owner_data - .put(pb, owner) + .db + .put_cf( + &env.rocks_env + .storage + .db + .cf_handle(AssetCompleteDetails::NAME) + .unwrap(), + pb, + asset_complete_details.convert_to_fb_bytes(), + ) .unwrap(); let payload = GetAsset { @@ -729,26 +731,26 @@ mod tests { owner_type: Updated::new(12, Some(UpdateVersion::Sequence(12)), OwnerType::Single), owner_delegate_seq: Updated::new(12, Some(UpdateVersion::Sequence(12)), Some(12)), }; - - env.rocks_env - .storage - .asset_static_data - .put(pb, asset_static_details) - .unwrap(); - env.rocks_env - .storage - .asset_dynamic_data - .put(pb, dynamic_details) - .unwrap(); - env.rocks_env - .storage - .asset_authority_data - .put(pb, asset_authority) - .unwrap(); + let asset_complete_details = AssetCompleteDetails { + pubkey: pb, + static_details: Some(asset_static_details), + dynamic_details: Some(dynamic_details), + authority: Some(asset_authority), + owner: Some(owner), + ..Default::default() + }; env.rocks_env .storage - .asset_owner_data - .put(pb, owner) + .db + .put_cf( + &env.rocks_env + .storage + .db + .cf_handle(AssetCompleteDetails::NAME) + .unwrap(), + pb, + asset_complete_details.convert_to_fb_bytes(), + ) .unwrap(); let payload = GetAsset { @@ -2352,26 +2354,26 @@ mod tests { owner_type: Updated::new(12, Some(UpdateVersion::Sequence(12)), OwnerType::Single), owner_delegate_seq: Updated::new(12, Some(UpdateVersion::Sequence(12)), Some(12)), }; - - env.rocks_env - .storage - .asset_static_data - .put(pb, asset_static_details) - .unwrap(); - env.rocks_env - .storage - .asset_dynamic_data - .put(pb, dynamic_details) - .unwrap(); - env.rocks_env - .storage - .asset_authority_data - .put(pb, asset_authority) - .unwrap(); + let asset_complete_details = AssetCompleteDetails { + pubkey: pb, + static_details: Some(asset_static_details), + dynamic_details: Some(dynamic_details), + authority: Some(asset_authority), + owner: Some(owner), + ..Default::default() + }; env.rocks_env .storage - .asset_owner_data - .put(pb, owner) + .db + .put_cf( + &env.rocks_env + .storage + .db + .cf_handle(AssetCompleteDetails::NAME) + .unwrap(), + pb, + asset_complete_details.convert_to_fb_bytes(), + ) .unwrap(); let payload = GetAsset { @@ -2720,36 +2722,35 @@ mod tests { generated_assets.collections.iter().for_each(|collection| { collection_dynamic_details.insert( collection.collection.value, - AssetDynamicDetails { + AssetCompleteDetails { pubkey: collection.collection.value, - url: Updated::new( - 100, - Some(UpdateVersion::Sequence(100)), - "http://example.com".to_string(), - ), - onchain_data: Some(Updated::new( - 100, - Some(UpdateVersion::Sequence(100)), - "{ + dynamic_details: Some(AssetDynamicDetails { + pubkey: collection.collection.value, + url: Updated::new( + 100, + Some(UpdateVersion::Sequence(100)), + "http://example.com".to_string(), + ), + onchain_data: Some(Updated::new( + 100, + Some(UpdateVersion::Sequence(100)), + "{ \"name\": \"WIF Drop\", \"symbol\": \"6WIF\"\ }" - .to_string(), - )), + .to_string(), + )), + ..Default::default() + }), ..Default::default() }, ); }); - let (d, o) = tokio::join!( - env.rocks_env - .storage - .asset_dynamic_data - .put_batch(collection_dynamic_details), - env.rocks_env.storage.asset_offchain_data.put_async( - "http://example.com".to_string(), - OffChainData { - url: "http://example.com".to_string(), - metadata: "{ + let o = env.rocks_env.storage.asset_offchain_data.put_async( + "http://example.com".to_string(), + OffChainData { + url: "http://example.com".to_string(), + metadata: "{ \"name\": \"WIF Drop\", \"symbol\": \"6WIF\", \"description\": \"Random Drop event! https://3000wif.com\", @@ -2783,12 +2784,14 @@ mod tests { ] } }" - .to_string(), - } - ) + .to_string(), + }, ); - d.unwrap(); - o.unwrap(); + env.rocks_env + .storage + .put_complete_asset_details_batch(collection_dynamic_details) + .unwrap(); + o.await.unwrap(); let api = nft_ingester::api::api_impl::DasApi::< MaybeProofChecker, @@ -3084,6 +3087,39 @@ mod tests { amount: 30000, write_version: 10, }; + + let ftm_complete = AssetCompleteDetails { + pubkey: fungible_token_mint2, + static_details: Some(AssetStaticDetails { + pubkey: fungible_token_mint2, + specification_asset_class: SpecificationAssetClass::FungibleAsset, + royalty_target_type: RoyaltyTargetType::Single, + created_at: 10, + edition_address: None, + }), + owner: Some(AssetOwner { + pubkey: fungible_token_mint2, + owner: Updated::new(10, Some(UpdateVersion::WriteVersion(10)), None), + delegate: Default::default(), + owner_type: Default::default(), + owner_delegate_seq: Default::default(), + }), + ..Default::default() + }; + + env.rocks_env + .storage + .db + .put_cf( + &env.rocks_env + .storage + .db + .cf_handle(AssetCompleteDetails::NAME) + .unwrap(), + fungible_token_mint2, + ftm_complete.convert_to_fb_bytes(), + ) + .unwrap(); let mut batch_storage = BatchSaveStorage::new( env.rocks_env.storage.clone(), 10, @@ -3112,35 +3148,6 @@ mod tests { .transform_and_save_mint_account(&mut batch_storage, &mint2) .unwrap(); batch_storage.flush().unwrap(); - env.rocks_env - .storage - .asset_static_data - .put( - fungible_token_mint2, - AssetStaticDetails { - pubkey: fungible_token_mint2, - specification_asset_class: SpecificationAssetClass::FungibleAsset, - royalty_target_type: RoyaltyTargetType::Single, - created_at: 10, - edition_address: None, - }, - ) - .unwrap(); - env.rocks_env - .storage - .asset_owner_data - .put( - fungible_token_mint2, - AssetOwner { - pubkey: fungible_token_mint2, - owner: Updated::new(10, Some(UpdateVersion::WriteVersion(10)), None), - delegate: Default::default(), - owner_type: Default::default(), - owner_delegate_seq: Default::default(), - }, - ) - .unwrap(); - let (_, rx) = tokio::sync::broadcast::channel::<()>(1); let synchronizer = Arc::new(synchronizer); diff --git a/nft_ingester/tests/gapfiller_tests.rs b/nft_ingester/tests/gapfiller_tests.rs index d765dd89e..dd677584c 100644 --- a/nft_ingester/tests/gapfiller_tests.rs +++ b/nft_ingester/tests/gapfiller_tests.rs @@ -5,7 +5,8 @@ use interface::asset_streaming_and_discovery::{ }; use metrics_utils::red::RequestErrorDurationMetrics; use nft_ingester::gapfiller::{process_asset_details_stream, process_raw_blocks_stream}; -use rocks_db::migrator::MigrationState; +use rocks_db::asset_generated::asset as fb; +use rocks_db::{asset::AssetCompleteDetails, column::TypedColumn, migrator::MigrationState}; use solana_sdk::pubkey::Pubkey; use solana_transaction_status::UiConfirmedBlock; use std::sync::Arc; @@ -53,20 +54,35 @@ async fn test_process_asset_details_stream() { ]))) }); process_asset_details_stream(rx, storage.clone(), 100, 200, mock).await; - let selected_data = storage - .asset_dynamic_data - .get(first_key.clone()) + .db + .get_pinned_cf( + &storage.db.cf_handle(AssetCompleteDetails::NAME).unwrap(), + first_key.clone(), + ) .unwrap() .unwrap(); - assert_eq!(selected_data.supply, Some(Updated::new(1, None, 10))); + let selected_data = fb::root_as_asset_complete_details(&selected_data).unwrap(); + let selected_data = AssetCompleteDetails::from(selected_data); + assert_eq!( + selected_data.dynamic_details.unwrap().supply, + Some(Updated::new(1, None, 10)) + ); let selected_data = storage - .asset_dynamic_data - .get(second_key.clone()) + .db + .get_pinned_cf( + &storage.db.cf_handle(AssetCompleteDetails::NAME).unwrap(), + second_key.clone(), + ) .unwrap() .unwrap(); - assert_eq!(selected_data.supply, Some(Updated::new(1, None, 10))); + let selected_data = fb::root_as_asset_complete_details(&selected_data).unwrap(); + let selected_data = AssetCompleteDetails::from(selected_data); + assert_eq!( + selected_data.dynamic_details.unwrap().supply, + Some(Updated::new(1, None, 10)) + ); } #[tokio::test] diff --git a/nft_ingester/tests/process_accounts.rs b/nft_ingester/tests/process_accounts.rs index 9604be526..c3fa07589 100644 --- a/nft_ingester/tests/process_accounts.rs +++ b/nft_ingester/tests/process_accounts.rs @@ -27,7 +27,9 @@ mod tests { use nft_ingester::processors::account_based::mpl_core_processor::MplCoreProcessor; use nft_ingester::processors::account_based::mplx_updates_processor::MplxAccountsProcessor; use nft_ingester::processors::account_based::token_updates_processor::TokenAccountsProcessor; + use rocks_db::asset::AssetCompleteDetails; use rocks_db::batch_savers::BatchSaveStorage; + use rocks_db::column::TypedColumn; use rocks_db::AssetAuthority; use solana_program::pubkey::Pubkey; use std::collections::HashMap; @@ -153,37 +155,26 @@ mod tests { .unwrap(); batch_storage.flush().unwrap(); - let first_owner_from_db = env + let first_from_db = env .rocks_env .storage - .asset_owner_data - .get(first_mint) + .get_complete_asset_details(first_mint) .unwrap() .unwrap(); - let second_owner_from_db = env + let second_from_db = env .rocks_env .storage - .asset_owner_data - .get(second_mint) + .get_complete_asset_details(second_mint) .unwrap() .unwrap(); + + let first_owner_from_db = first_from_db.owner.unwrap(); + let second_owner_from_db = second_from_db.owner.unwrap(); assert_eq!(first_owner_from_db.owner.value.unwrap(), first_owner); assert_eq!(second_owner_from_db.owner.value.unwrap(), second_owner); - let first_dynamic_from_db = env - .rocks_env - .storage - .asset_dynamic_data - .get(first_mint) - .unwrap() - .unwrap(); - let second_dynamic_from_db = env - .rocks_env - .storage - .asset_dynamic_data - .get(second_mint) - .unwrap() - .unwrap(); + let first_dynamic_from_db = first_from_db.dynamic_details.unwrap(); + let second_dynamic_from_db = second_from_db.dynamic_details.unwrap(); assert_eq!(first_dynamic_from_db.supply.unwrap().value, 1); assert_eq!(second_dynamic_from_db.supply.unwrap().value, 1); } @@ -267,15 +258,13 @@ mod tests { let first_static_from_db = env .rocks_env .storage - .asset_static_data - .get(first_mint) + .get_complete_asset_details(first_mint) .unwrap() .unwrap(); let second_static_from_db = env .rocks_env .storage - .asset_static_data - .get(second_mint) + .get_complete_asset_details(second_mint) .unwrap() .unwrap(); assert_eq!(first_static_from_db.pubkey, first_mint); @@ -409,17 +398,27 @@ mod tests { let cli = Cli::default(); let (env, _generated_assets) = setup::TestEnvironment::create(&cli, cnt, 100).await; let mpl_core_parser = MplCoreProcessor::new(Arc::new(IngesterMetricsConfig::new())); + let second_authority_complete_asset = AssetCompleteDetails { + pubkey: second_authority, + authority: Some(AssetAuthority { + pubkey: Default::default(), + authority: second_owner, + slot_updated: 0, + write_version: None, + }), + ..Default::default() + }; env.rocks_env .storage - .asset_authority_data - .put( + .db + .merge_cf( + &env.rocks_env + .storage + .db + .cf_handle(AssetCompleteDetails::NAME) + .unwrap(), second_authority, - AssetAuthority { - pubkey: Default::default(), - authority: second_owner, - slot_updated: 0, - write_version: None, - }, + second_authority_complete_asset.convert_to_fb_bytes(), ) .unwrap(); @@ -444,27 +443,15 @@ mod tests { .unwrap(); batch_storage.flush().unwrap(); - let first_dynamic_from_db = env + let first = env .rocks_env .storage - .asset_dynamic_data - .get(first_mpl_core) - .unwrap() - .unwrap(); - let first_owner_from_db = env - .rocks_env - .storage - .asset_owner_data - .get(first_mpl_core) - .unwrap() - .unwrap(); - let first_authority_from_db = env - .rocks_env - .storage - .asset_authority_data - .get(first_mpl_core) + .get_complete_asset_details(first_mpl_core) .unwrap() .unwrap(); + let first_dynamic_from_db = first.dynamic_details.unwrap(); + let first_owner_from_db = first.owner.unwrap(); + let first_authority_from_db = first.authority.unwrap(); assert_eq!(first_dynamic_from_db.pubkey, first_mpl_core); assert_eq!(first_dynamic_from_db.is_frozen.value, true); assert_eq!(first_dynamic_from_db.url.value, first_uri.to_string()); @@ -475,27 +462,16 @@ mod tests { assert_eq!(first_owner_from_db.owner.value.unwrap(), first_owner); assert_eq!(first_authority_from_db.authority, first_authority); - let second_dynamic_from_db = env - .rocks_env - .storage - .asset_dynamic_data - .get(second_mpl_core) - .unwrap() - .unwrap(); - let second_owner_from_db = env + let second = env .rocks_env .storage - .asset_owner_data - .get(second_mpl_core) - .unwrap() - .unwrap(); - let second_authority_from_db = env - .rocks_env - .storage - .asset_authority_data - .get(second_mpl_core) + .get_complete_asset_details(second_mpl_core) .unwrap() .unwrap(); + let second_dynamic_from_db = second.dynamic_details.unwrap(); + let second_owner_from_db = second.owner.unwrap(); + let second_authority_from_db = second.authority.unwrap(); + assert_eq!(second_dynamic_from_db.pubkey, second_mpl_core); assert_eq!(second_dynamic_from_db.is_frozen.value, false); assert_eq!(second_dynamic_from_db.url.value, second_uri.to_string()); diff --git a/postgre-client/src/asset_filter_client.rs b/postgre-client/src/asset_filter_client.rs index 9c1a3f6e6..d129c87c3 100644 --- a/postgre-client/src/asset_filter_client.rs +++ b/postgre-client/src/asset_filter_client.rs @@ -12,7 +12,7 @@ use crate::{ error::IndexDbError, model::{ AssetSortBy, AssetSortDirection, AssetSortedIndex, AssetSorting, AssetSupply, - SearchAssetsFilter, + SearchAssetsFilter, SpecificationAssetClass, }, storage_traits::AssetPubkeyFilteredFetcher, PgClient, COUNT_ACTION, SELECT_ACTION, SQL_COMPONENT, @@ -136,12 +136,10 @@ impl PgClient { filter: &'a SearchAssetsFilter, options: &'a GetByMethodsOptions, ) -> Result, IndexDbError> { - let mut query_builder = QueryBuilder::new("SELECT count(*) FROM assets_v3 "); - let group_clause_required = add_filter_clause(&mut query_builder, filter, options); - // Add GROUP BY clause if necessary - if group_clause_required { - query_builder.push(" GROUP BY assets_v3.ast_pubkey, assets_v3.ast_slot_created, assets_v3.ast_slot_updated "); - } + let mut query_builder = QueryBuilder::new( + "SELECT COUNT(DISTINCT (assets_v3.ast_pubkey)) AS total_groups FROM assets_v3 ", + ); + add_filter_clause(&mut query_builder, filter, options); query_builder.push(";"); Ok(query_builder) @@ -149,7 +147,7 @@ impl PgClient { } fn add_filter_clause<'a>( - query_builder: &mut QueryBuilder<'a, Postgres>, + mut query_builder: &mut QueryBuilder<'a, Postgres>, filter: &'a SearchAssetsFilter, options: &'a GetByMethodsOptions, ) -> bool { @@ -197,7 +195,43 @@ fn add_filter_clause<'a>( query_builder.push(" AND assets_v3.ast_specification_asset_class = "); query_builder.push_bind(asset_class); } - + if let Some(ref token_type) = filter.token_type { + match token_type { + TokenType::Fungible => { + let classes = vec![ + SpecificationAssetClass::FungibleToken, + SpecificationAssetClass::FungibleAsset, + ]; + push_asset_class_filter(&mut query_builder, &classes, None); + } + TokenType::NonFungible => { + let classes = vec![ + SpecificationAssetClass::MplCoreAsset, + SpecificationAssetClass::MplCoreCollection, + SpecificationAssetClass::Nft, + SpecificationAssetClass::ProgrammableNft, + ]; + push_asset_class_filter(&mut query_builder, &classes, None); + } + TokenType::RegularNFT => { + let classes = vec![ + SpecificationAssetClass::MplCoreAsset, + SpecificationAssetClass::MplCoreCollection, + SpecificationAssetClass::Nft, + SpecificationAssetClass::ProgrammableNft, + ]; + push_asset_class_filter(&mut query_builder, &classes, Some(false)); + } + TokenType::CompressedNFT => { + let classes = vec![ + SpecificationAssetClass::Nft, + SpecificationAssetClass::ProgrammableNft, + ]; + push_asset_class_filter(&mut query_builder, &classes, Some(true)); + } + TokenType::All => {} + } + } if let Some(owner_address) = &filter.owner_address { if let Some(ref token_type) = filter.token_type { match *token_type { @@ -338,6 +372,26 @@ fn add_filter_clause<'a>( group_clause_required } +fn push_asset_class_filter( + query_builder: &mut QueryBuilder, + classes: &[SpecificationAssetClass], + compressed: Option, +) { + if !classes.is_empty() { + query_builder.push(" AND assets_v3.ast_specification_asset_class IN ("); + let mut qb = query_builder.separated(", "); + for cl in classes.iter() { + qb.push_bind(*cl); + } + query_builder.push(") "); + } + if let Some(is_compressed) = compressed { + query_builder.push(" AND assets_v3.ast_is_compressed = "); + query_builder.push_bind(is_compressed); + query_builder.push(" "); + } +} + fn add_slot_and_key_comparison( key: &str, comparison: &str, diff --git a/postgre-client/src/asset_index_client.rs b/postgre-client/src/asset_index_client.rs index 9473d71c4..83ff5ffc2 100644 --- a/postgre-client/src/asset_index_client.rs +++ b/postgre-client/src/asset_index_client.rs @@ -6,7 +6,8 @@ use std::{ use async_trait::async_trait; use solana_sdk::pubkey::Pubkey; -use sqlx::{Executor, Postgres, QueryBuilder, Transaction}; +use sqlx::{Connection, Executor, Postgres, QueryBuilder, Transaction}; +use std::panic::{catch_unwind, AssertUnwindSafe}; use crate::{ error::IndexDbError, @@ -17,6 +18,7 @@ use crate::{ SQL_COMPONENT, TRANSACTION_ACTION, UPDATE_ACTION, }; use entities::{ + enums::SpecificationAssetClass as AssetSpecClass, enums::AssetType, models::{AssetIndex, Creator, FungibleAssetIndex, FungibleToken, UrlWithStatus}, }; @@ -206,6 +208,25 @@ pub(crate) fn split_assets_into_components(asset_indexes: &[AssetIndex]) -> Asse let mut asset_indexes = asset_indexes.to_vec(); asset_indexes.sort_by(|a, b| a.pubkey.cmp(&b.pubkey)); + let fungible_tokens = asset_indexes + .iter() + .filter(|asset| asset.specification_asset_class == AssetSpecClass::FungibleToken) + .map(|asset| { + FungibleToken { + key: asset.pubkey, + slot_updated: asset.slot_updated, + // it's unlikely that rows below will not be filled for fungible token + // but even if that happens we will save asset with default values + owner: asset.owner.unwrap_or_default(), + asset: asset.fungible_asset_mint.unwrap_or_default(), + balance: asset.fungible_asset_balance.unwrap_or_default() as i64, + } + }) + .collect::>(); + let asset_indexes = asset_indexes + .into_iter() + .filter(|asset| asset.fungible_asset_mint.is_none()) + .collect::>(); // Collect all creators from all assets let mut all_creators: Vec<(Pubkey, Creator, i64)> = asset_indexes @@ -371,93 +392,97 @@ impl AssetIndexStorage for PgClient { ) -> Result<(), IndexDbError> { let operation_start_time = chrono::Utc::now(); let mut transaction = self.start_transaction().await?; - let dump_result = match asset_type { - AssetType::NonFungible => { - let Some(metadata_path) = - base_path.join("metadata.csv").to_str().map(str::to_owned) - else { - return Err(IndexDbError::BadArgument(format!( - "invalid path '{:?}'", - base_path - ))); - }; - let Some(creators_path) = - base_path.join("creators.csv").to_str().map(str::to_owned) - else { - return Err(IndexDbError::BadArgument(format!( - "invalid path '{:?}'", - base_path - ))); - }; - let Some(assets_authorities_path) = base_path - .join("assets_authorities.csv") - .to_str() - .map(str::to_owned) - else { - return Err(IndexDbError::BadArgument(format!( - "invalid path '{:?}'", - base_path - ))); - }; - let Some(assets_path) = base_path.join("assets.csv").to_str().map(str::to_owned) - else { - return Err(IndexDbError::BadArgument(format!( - "invalid path '{:?}'", - base_path - ))); - }; - - let result_of_copy = self - .copy_nfts( - metadata_path, - creators_path, - assets_path, - assets_authorities_path, - &mut transaction, - ) - .await; - - if result_of_copy.is_ok() { - self.update_last_synced_key( - last_key, - &mut transaction, - "last_synced_key", - asset_type, - ) - .await - } else { - result_of_copy + let dump_result = (async { + match asset_type { + AssetType::NonFungible => { + let Some(metadata_path) = + base_path.join("metadata.csv").to_str().map(str::to_owned) + else { + return Err(IndexDbError::BadArgument(format!( + "invalid path '{:?}'", + base_path + ))); + }; + let Some(creators_path) = + base_path.join("creators.csv").to_str().map(str::to_owned) + else { + return Err(IndexDbError::BadArgument(format!( + "invalid path '{:?}'", + base_path + ))); + }; + let Some(assets_authorities_path) = base_path + .join("assets_authorities.csv") + .to_str() + .map(str::to_owned) + else { + return Err(IndexDbError::BadArgument(format!( + "invalid path '{:?}'", + base_path + ))); + }; + let Some(assets_path) = + base_path.join("assets.csv").to_str().map(str::to_owned) + else { + return Err(IndexDbError::BadArgument(format!( + "invalid path '{:?}'", + base_path + ))); + }; + + let result_of_copy = self + .copy_nfts( + metadata_path, + creators_path, + assets_path, + assets_authorities_path, + &mut transaction, + ) + .await; + + if result_of_copy.is_ok() { + self.update_last_synced_key( + last_key, + &mut transaction, + "last_synced_key", + asset_type, + ) + .await + } else { + result_of_copy + } } - } - AssetType::Fungible => { - let Some(fungible_tokens_path) = base_path - .join("fungible_tokens.csv") - .to_str() - .map(str::to_owned) - else { - return Err(IndexDbError::BadArgument(format!( - "invalid path '{:?}'", - base_path - ))); - }; - - let result_of_copy = self - .copy_fungibles(fungible_tokens_path, &mut transaction) - .await; - - if result_of_copy.is_ok() { - self.update_last_synced_key( - last_key, - &mut transaction, - "last_synced_key", - asset_type, - ) - .await - } else { - result_of_copy + AssetType::Fungible => { + let Some(fungible_tokens_path) = base_path + .join("fungible_tokens.csv") + .to_str() + .map(str::to_owned) + else { + return Err(IndexDbError::BadArgument(format!( + "invalid path '{:?}'", + base_path + ))); + }; + + let result_of_copy = self + .copy_fungibles(fungible_tokens_path, &mut transaction) + .await; + + if result_of_copy.is_ok() { + self.update_last_synced_key( + last_key, + &mut transaction, + "last_synced_key", + asset_type, + ) + .await + } else { + result_of_copy + } } } - }; + }) + .await; match dump_result { Ok(_) => { @@ -465,7 +490,7 @@ impl AssetIndexStorage for PgClient { self.metrics.observe_request( SQL_COMPONENT, TRANSACTION_ACTION, - "transaction_failed_total", + "load_from_dump", operation_start_time, ); Ok(()) @@ -475,7 +500,7 @@ impl AssetIndexStorage for PgClient { self.metrics.observe_request( SQL_COMPONENT, TRANSACTION_ACTION, - "transaction_failed_total", + "load_from_dump_failed", operation_start_time, ); Err(e) @@ -489,9 +514,19 @@ impl AssetIndexStorage for PgClient { asset_type: AssetType, ) -> Result<(), IndexDbError> { let mut transaction = self.start_transaction().await?; - self.update_last_synced_key(last_key, &mut transaction, "last_synced_key", asset_type) - .await?; - self.commit_transaction(transaction).await + match self + .update_last_synced_key(last_key, &mut transaction, "last_synced_key", asset_type) + .await + { + Ok(_) => { + self.commit_transaction(transaction).await?; + Ok(()) + } + Err(e) => { + self.rollback_transaction(transaction).await?; + Err(e) + } + } } } @@ -513,20 +548,51 @@ pub struct CreatorsUpdates { } impl PgClient { - pub async fn get_existing_metadata_keys(&self) -> Result>, String> { + pub async fn get_existing_metadata_keys(&self) -> Result>, IndexDbError> { + let operation_start_time = chrono::Utc::now(); + + // Start the transaction + let mut transaction = self.start_transaction().await?; + + // Call the transactional logic method + let result = self + .get_existing_metadata_keys_logic(&mut transaction) + .await; + // Roll back the transaction (since we only used it for the cursor) + self.rollback_transaction(transaction).await?; + self.metrics.observe_request( + SQL_COMPONENT, + TRANSACTION_ACTION, + "transaction_total", + operation_start_time, + ); + result + } + + // The transactional logic is encapsulated here + async fn get_existing_metadata_keys_logic( + &self, + transaction: &mut Transaction<'_, Postgres>, + ) -> Result>, IndexDbError> { let mut set = HashSet::new(); - let mut tx = self.pool.begin().await.map_err(|e| e.to_string())?; + + // Declare the cursor let mut query_builder: QueryBuilder<'_, Postgres> = QueryBuilder::new( "DECLARE all_tasks CURSOR FOR SELECT tsk_id FROM tasks WHERE tsk_id IS NOT NULL", ); - self.execute_query_with_metrics(&mut tx, &mut query_builder, CREATE_ACTION, "cursor") + self.execute_query_with_metrics(transaction, &mut query_builder, CREATE_ACTION, "cursor") .await?; + + // Fetch rows in a loop loop { let mut query_builder: QueryBuilder<'_, Postgres> = QueryBuilder::new("FETCH 10000 FROM all_tasks"); - // Fetch a batch of rows from the cursor let query = query_builder.build_query_as::(); - let rows = query.fetch_all(&mut tx).await.map_err(|e| e.to_string())?; + let rows = query.fetch_all(&mut *transaction).await.map_err(|e| { + self.metrics + .observe_error(SQL_COMPONENT, SELECT_ACTION, "FETCH_CURSOR"); + IndexDbError::QueryExecErr(e) + })?; // If no rows were fetched, we are done if rows.is_empty() { @@ -537,10 +603,12 @@ impl PgClient { set.insert(row.tsk_id); } } + + // Close the cursor let mut query_builder: QueryBuilder<'_, Postgres> = QueryBuilder::new("CLOSE all_tasks"); - self.execute_query_with_metrics(&mut tx, &mut query_builder, DROP_ACTION, "cursor") + self.execute_query_with_metrics(transaction, &mut query_builder, DROP_ACTION, "cursor") .await?; - self.rollback_transaction(tx).await?; + Ok(set) } diff --git a/postgre-client/src/lib.rs b/postgre-client/src/lib.rs index e621170cc..bf14b55f3 100644 --- a/postgre-client/src/lib.rs +++ b/postgre-client/src/lib.rs @@ -10,6 +10,7 @@ use sqlx::{ }; use std::{sync::Arc, time::Duration}; use tracing::log::LevelFilter; +use sqlx::Executor; pub mod asset_filter_client; pub mod asset_index_client; @@ -187,4 +188,35 @@ impl PgClient { .observe_request(SQL_COMPONENT, TRANSACTION_ACTION, "rollback", start_time); Ok(()) } + + #[cfg(feature = "integration_tests")] + pub async fn clean_db(&self) -> Result<(), IndexDbError> { + let mut transaction = self.pool.begin().await?; + + self.drop_fungible_indexes(&mut transaction).await?; + self.drop_nft_indexes(&mut transaction).await?; + self.drop_constraints(&mut transaction).await?; + for table in [ + "assets_v3", + "assets_authorities", + "asset_creators_v3", + "batch_mints", + "core_fees", + "fungible_tokens", + ] { + self.truncate_table(&mut transaction, table).await?; + } + + transaction.execute(sqlx::query("update last_synced_key set last_synced_asset_update_key = null where id = 1;")).await?; + + self.recreate_fungible_indexes(&mut transaction).await?; + self.recreate_nft_indexes(&mut transaction).await?; + self.recreate_constraints(&mut transaction).await?; + + transaction.commit().await.map_err(|e| e)?; + // those await above will not always rollback the tx + // take this into account if we use this function somewhere else except the tests + + Ok(()) + } } diff --git a/postgre-client/src/load_client.rs b/postgre-client/src/load_client.rs index db05d3571..bf8d77db7 100644 --- a/postgre-client/src/load_client.rs +++ b/postgre-client/src/load_client.rs @@ -37,7 +37,7 @@ impl PgClient { Ok(()) } - async fn truncate_table( + pub(crate) async fn truncate_table( &self, transaction: &mut Transaction<'_, Postgres>, table: &str, diff --git a/postgre-client/src/model.rs b/postgre-client/src/model.rs index dcbbb89b6..aad8a3762 100644 --- a/postgre-client/src/model.rs +++ b/postgre-client/src/model.rs @@ -19,11 +19,16 @@ pub enum SpecificationAssetClass { FungibleToken, FungibleAsset, Nft, + // legacy code, never ever existed in the first place PrintableNft, ProgrammableNft, + // legacy code, never ever existed in the first place Print, + // legacy code, never ever existed in the first place TransferRestrictedNft, + // legacy code, never ever existed in the first place NonTransferableNft, + // legacy code, never ever existed in the first place IdentityNft, MplCoreAsset, MplCoreCollection, diff --git a/postgre-client/src/temp_index_client.rs b/postgre-client/src/temp_index_client.rs index 376e8d7e1..6d81a904b 100644 --- a/postgre-client/src/temp_index_client.rs +++ b/postgre-client/src/temp_index_client.rs @@ -40,6 +40,7 @@ impl TempClient { pub async fn initialize(&self, initial_key: &[u8]) -> Result<(), String> { let mut c = self.pooled_connection.lock().await; + // todo: ensure the transactions are rolled back on error let mut tx = c.begin().await.map_err(|e| e.to_string())?; for table in [ "tasks", diff --git a/rocks-db/Cargo.toml b/rocks-db/Cargo.toml index 7a9a0a32f..4555a54ab 100644 --- a/rocks-db/Cargo.toml +++ b/rocks-db/Cargo.toml @@ -41,6 +41,8 @@ usecase = { path = "../usecase" } tempfile = { workspace = true } bubblegum-batch-sdk = { workspace = true } num-traits = { workspace = true } +# using a different version of flatbuffer compared to the rest of the project as this one is compatible with generator used for AssetCompleteDetails structures +flatbuffers = { version="24.3.25", features = ["serialize"]} indicatif = { workspace = true } [dev-dependencies] @@ -48,6 +50,7 @@ rand = { workspace = true } setup = { path = "../tests/setup" } criterion = { workspace = true } tracing-test = { workspace = true } +itertools = { workspace = true } [features] integration_tests = [] diff --git a/rocks-db/benches/dump_benchmark.rs b/rocks-db/benches/dump_benchmark.rs index a522b8761..3133cafd8 100644 --- a/rocks-db/benches/dump_benchmark.rs +++ b/rocks-db/benches/dump_benchmark.rs @@ -1,51 +1,204 @@ -use std::{collections::HashSet, sync::Arc}; +use std::sync::Arc; use criterion::{criterion_group, criterion_main, Criterion}; -use rocks_db::{storage_traits::Dumper, Storage}; +use entities::api_req_params::Options; +use metrics_utils::SynchronizerMetricsConfig; +use rocks_db::{ + storage_traits::{AssetIndexReader, Dumper}, + Storage, +}; +use solana_sdk::pubkey::Pubkey; use tempfile::TempDir; +async fn bench_batch_get_keys(storage: Arc, pubkeys: Vec) { + storage.asset_dynamic_data.batch_get(pubkeys).await.unwrap(); + // storage.asset_data.batch_get(pubkeys) + // .await + // .unwrap(); +} + +// async fn simple_iterate(storage: Arc) { +// for k in storage.asset_data.iter_start() { +// let _ = k; +// } +// } + +// async fn deserialized_iterate(storage: Arc) { +// for k in storage +// .asset_data +// .pairs_iterator(storage.asset_data.iter_start()) +// { +// let (_k, _v) = k; +// } +// } + +// async fn deserialized_only_value_iterate(storage: Arc) { +// for k in storage +// .asset_data +// .values_iterator(storage.asset_data.iter_start()) +// { +// let _ = k; +// } +// } + +// async fn collect_simple(storage: Arc) { +// let core_collections: HashMap = storage +// .asset_data +// .values_iterator(storage.asset_data.iter_start()) +// .filter_map(|a| { +// a.static_details +// .filter(|sd| { +// sd.specification_asset_class == SpecificationAssetClass::MplCoreCollection +// }) +// .map(|_| a.collection) +// .flatten() +// }) +// .filter_map(|a| a.authority.value.map(|v| (a.pubkey, v))) +// .collect(); +// } + +// async fn collect_plain(storage: Arc) { +// let core_collections: HashMap = storage +// .asset_data +// .values_iterator(storage.asset_data.iter_start()) +// .filter(|a|a.static_details.as_ref().is_some_and(|sd| +// sd.specification_asset_class == SpecificationAssetClass::MplCoreCollection) && a.collection.as_ref().is_some_and(|c| c.authority.value.is_some())) +// .map(|a| (a.pubkey, a.collection.unwrap().authority.value.unwrap())) +// .collect(); +// } +async fn bench_get_assets(storage: Arc, pubkeys: Vec) { + storage + .get_asset_selected_maps_async( + pubkeys, + &None, + &Options { + ..Default::default() + }, + ) + .await + .unwrap(); +} + +async fn bench_get_asset_indexes(storage: Arc, pubkeys: Vec) { + storage.get_asset_indexes(&pubkeys).await.unwrap(); +} + +async fn bench_get_assets_individually(storage: Arc, pubkeys: Vec) { + for pubkey in pubkeys { + storage + .get_asset_selected_maps_async(vec![pubkey], &None, &Options::default()) + .await + .unwrap(); + } +} + async fn bench_dump(storage: Arc, batch_size: usize) { let (_tx, rx) = tokio::sync::broadcast::channel::<()>(1); let temp_dir = TempDir::new().expect("Failed to create a temporary directory"); let temp_dir_path = temp_dir.path(); + let sync_metrics = Arc::new(SynchronizerMetricsConfig::new()); storage - .dump_db(temp_dir_path, batch_size, &rx) + .dump_db(temp_dir_path, batch_size, &rx, sync_metrics) .await .unwrap(); } +#[tokio::test] +#[tracing_test::traced_test] +async fn test_dump() { + println!("Starting the test"); + info!("Starting the test"); + let env = setup::rocks::RocksTestEnvironment::new(&[]); + let cnt = 1_000; + println!("env created, generating assets"); + env.generate_assets(cnt, 25).await; + println!("assets generated"); + let (_tx, rx) = tokio::sync::broadcast::channel::<()>(1); + let temp_dir = TempDir::new().expect("Failed to create a temporary directory"); + let temp_dir_path = temp_dir.path(); + let sync_metrics = Arc::new(SynchronizerMetricsConfig::new()); + env.storage + .dump_db(temp_dir_path, batch_size, &rx, sync_metrics) + .await + .expect("Failed to dump the database"); + println!("dump complete"); +} +fn noop_benchmark(_c: &mut Criterion) {} + fn dump_benchmark(c: &mut Criterion) { let env = setup::rocks::RocksTestEnvironment::new(&[]); let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { + let assets = rt.block_on(async { let cnt = 1_000_000; - _ = env.generate_assets(cnt, 25).await + env.generate_assets(cnt, 25).await }); + let sampled_pubkeys: Vec = (99..assets.pubkeys.len()) + .step_by(100) + .take(1000) + .map(|i| assets.pubkeys[i].clone()) + .collect(); let rt = tokio::runtime::Runtime::new().unwrap(); let mut group = c.benchmark_group("Dumping Group"); group.sample_size(10); - group.measurement_time(std::time::Duration::from_secs(60)); + // group.measurement_time(std::time::Duration::from_secs(60)); let storage = env.storage; - group.bench_function("500 batch size", |b| { - b.iter(|| rt.block_on(bench_dump(storage.clone(), 500))) + group.bench_function("get_assets", |b| { + b.iter(|| rt.block_on(bench_get_assets(storage.clone(), sampled_pubkeys.clone()))) + }); + group.bench_function("get_assets_individually", |b| { + b.iter(|| { + rt.block_on(bench_get_assets_individually( + storage.clone(), + sampled_pubkeys.clone(), + )) + }) }); - group.bench_function("1k batch size", |b| { - b.iter(|| rt.block_on(bench_dump(storage.clone(), 1_000))) + group.bench_function("get_asset_indexes", |b| { + b.iter(|| { + rt.block_on(bench_get_asset_indexes( + storage.clone(), + sampled_pubkeys.clone(), + )) + }) }); + // group.bench_function("batch_get_keys", |b| { + // b.iter(|| rt.block_on(bench_batch_get_keys(storage.clone(), sampled_pubkeys.clone()))) + // }); + // group.bench_function("collect_simple", |b| { + // b.iter(|| rt.block_on(collect_simple(storage.clone()))) + // }); + // group.bench_function("collect_plain", |b| { + // b.iter(|| rt.block_on(collect_plain(storage.clone()))) + // }); + // group.bench_function("simple_iterate", |b| { + // b.iter(|| rt.block_on(simple_iterate(storage.clone()))) + // }); + // group.bench_function("deserialized_iterate", |b| { + // b.iter(|| rt.block_on(deserialized_iterate(storage.clone()))) + // }); + // group.bench_function("deserialized_only_value_iterate", |b| { + // b.iter(|| rt.block_on(deserialized_only_value_iterate(storage.clone()))) + // }); + // group.bench_function("500 batch size", |b| { + // b.iter(|| rt.block_on(bench_dump(storage.clone(), 500))) + // }); + // group.bench_function("1k batch size", |b| { + // b.iter(|| rt.block_on(bench_dump(storage.clone(), 1_000))) + // }); group.bench_function("2k batch size", |b| { b.iter(|| rt.block_on(bench_dump(storage.clone(), 2_000))) }); - group.bench_function("5k batch size", |b| { - b.iter(|| rt.block_on(bench_dump(storage.clone(), 5_000))) - }); - group.bench_function("10k batch size", |b| { - b.iter(|| rt.block_on(bench_dump(storage.clone(), 10_000))) - }); - group.bench_function("20k batch size", |b| { - b.iter(|| rt.block_on(bench_dump(storage.clone(), 20_000))) - }); + // group.bench_function("5k batch size", |b| { + // b.iter(|| rt.block_on(bench_dump(storage.clone(), 5_000))) + // }); + // group.bench_function("10k batch size", |b| { + // b.iter(|| rt.block_on(bench_dump(storage.clone(), 10_000))) + // }); + // group.bench_function("20k batch size", |b| { + // b.iter(|| rt.block_on(bench_dump(storage.clone(), 20_000))) + // }); } criterion_group!(benches, dump_benchmark); diff --git a/rocks-db/benches/misc_benchmark.rs b/rocks-db/benches/misc_benchmark.rs index 280357ac3..841749cd4 100644 --- a/rocks-db/benches/misc_benchmark.rs +++ b/rocks-db/benches/misc_benchmark.rs @@ -1,6 +1,12 @@ +use rocks_db::asset_generated::asset as fb; + use bincode::{deserialize, serialize}; use criterion::{criterion_group, criterion_main, Criterion}; -use rocks_db::AssetDynamicDetails; +use rocks_db::{ + asset::{self, AssetCompleteDetails}, + AssetDynamicDetails, +}; +use setup::rocks::RocksTestEnvironmentSetup; use solana_sdk::pubkey::Pubkey; fn bincode_decode_benchmark(c: &mut Criterion) { @@ -22,6 +28,183 @@ fn bincode_decode_benchmark(c: &mut Criterion) { }) }); } +fn flatbuffer_vs_bincode_merge_functions_benchmark(c: &mut Criterion) { + let cnt = 1; + let updates_number = 1000; + let slot = 100; + let pubkeys = (0..cnt).map(|_| Pubkey::new_unique()).collect::>(); + let static_details = RocksTestEnvironmentSetup::static_data_for_nft(&pubkeys, slot); + let authorities = (0..updates_number) + .map(|_| RocksTestEnvironmentSetup::with_authority(&pubkeys)) + .collect::>(); + let owners = (0..updates_number) + .map(|_| RocksTestEnvironmentSetup::test_owner(&pubkeys)) + .collect::>(); + let dynamic_details = (0..updates_number) + .map(|_| RocksTestEnvironmentSetup::dynamic_data(&pubkeys, slot)) + .collect::>(); + let collections = (0..updates_number) + .map(|_| RocksTestEnvironmentSetup::collection_without_authority(&pubkeys)) + .collect::>(); + + let assets_versions = (0..updates_number) + .map(|i| AssetCompleteDetails { + pubkey: *pubkeys.get(0).unwrap(), + static_details: static_details.get(0).map(|v| v.to_owned()), + dynamic_details: dynamic_details + .get(i) + .and_then(|d| d.get(0)) + .map(|v| v.to_owned()), + authority: authorities + .get(i) + .and_then(|d| d.get(0)) + .map(|v| v.to_owned()), + owner: owners.get(i).and_then(|d| d.get(0)).map(|v| v.to_owned()), + collection: collections + .get(i) + .and_then(|d| d.get(0)) + .map(|v| v.to_owned()), + }) + .collect::>(); + let bincode_bytes_versions = assets_versions + .iter() + .map(|a| serialize(&a).unwrap()) + .collect::>(); + let mut builder = flatbuffers::FlatBufferBuilder::with_capacity(2500); + let fb_bytes_versions = assets_versions + .iter() + .map(|asset| { + builder.reset(); + let asset_fb = asset.convert_to_fb(&mut builder); + builder.finish_minimal(asset_fb); + builder.finished_data().to_vec() + }) + .collect::>(); + let key = rand::random::<[u8; 32]>(); + + c.bench_function("merge with a bincode object", |b| { + b.iter(|| { + let mut existing_val: Option> = None; + bincode_bytes_versions.iter().for_each(|bincode_bytes| { + let new_val = AssetCompleteDetails::merge_complete_details_raw( + &key, + existing_val.as_ref().map(|v| v.as_slice()), + vec![bincode_bytes.as_slice()].into_iter(), + ); + existing_val = Some(new_val.expect("should merge")) + }); + }) + }); + c.bench_function("merge with a flatbuffer object", |b| { + b.iter(|| { + let mut existing_val: Option> = None; + fb_bytes_versions.iter().for_each(|fb_bytes| { + let new_val = asset::merge_complete_details_fb_raw( + &key, + existing_val.as_ref().map(|v| v.as_slice()), + vec![fb_bytes.as_slice()].into_iter(), + ); + existing_val = Some(new_val.expect("should merge")) + }); + }) + }); + c.bench_function("merge with a flatbuffer object via simple objects", |b| { + b.iter(|| { + let mut existing_val: Option> = None; + fb_bytes_versions.iter().for_each(|fb_bytes| { + let new_val = asset::merge_complete_details_fb_through_proxy( + &key, + existing_val.as_ref().map(|v| v.as_slice()), + vec![fb_bytes.as_slice()].into_iter(), + ); + existing_val = Some(new_val.expect("should merge")) + }); + }) + }); + c.bench_function("merge with a flatbuffer object via a simpler merger", |b| { + b.iter(|| { + let mut existing_val: Option> = None; + fb_bytes_versions.iter().for_each(|fb_bytes| { + let new_val = asset::merge_complete_details_fb_simple_raw( + &key, + existing_val.as_ref().map(|v| v.as_slice()), + vec![fb_bytes.as_slice()].into_iter(), + ); + existing_val = Some(new_val.expect("should merge")) + }); + }) + }); + c.bench_function("deserialize a flatbuffer objects ", |b| { + b.iter(|| { + fb_bytes_versions.iter().for_each(|fb_bytes| { + fb::root_as_asset_complete_details(fb_bytes).expect("should deserialize"); + }); + }) + }); + c.bench_function("serialize using a builder with conversion", |b| { + b.iter(|| { + let mut builder = flatbuffers::FlatBufferBuilder::with_capacity(2500); + assets_versions.iter().for_each(|asset| { + builder.reset(); + let asset_fb = asset.convert_to_fb(&mut builder); + builder.finish_minimal(asset_fb); + builder.finished_data().to_vec(); + }); + }) + }); +} + +// fn cbor_vs_bincode_decode_benchmark(c: &mut Criterion) { +// let cnt = 1_000; +// let slot = 100; +// let pubkeys = (0..cnt).map(|_| Pubkey::new_unique()).collect::>(); +// let static_details = RocksTestEnvironmentSetup::static_data_for_nft(&pubkeys, slot); +// let authorities = RocksTestEnvironmentSetup::with_authority(&pubkeys); +// let owners = RocksTestEnvironmentSetup::test_owner(&pubkeys); +// let dynamic_details: Vec = RocksTestEnvironmentSetup::dynamic_data(&pubkeys, slot); +// let collections = RocksTestEnvironmentSetup::collection_without_authority(&pubkeys); +// let dclone = dynamic_details.clone(); +// let assets = pubkeys.iter() +// .zip(static_details) +// .zip(authorities) +// .zip(owners) +// .zip(dynamic_details) +// .zip(collections) +// .map( +// |(((((pk, static_data), authority), owner), dynamic), collection)| AssetCompleteDetails { +// pubkey: *pk, +// static_details: Some(static_data), +// dynamic_details: Some(dynamic), +// authority: Some(authority), +// owner: Some(owner), +// collection: Some(collection), +// }, +// ) +// .collect::>(); + +// c.bench_function("encode/decode AssetCompleteDetails (1000 items)", |b| { +// b.iter(|| { +// assets +// .iter() +// .map(|a| serialize(&a).unwrap()) +// .map(|b| deserialize::(&b).unwrap()) +// .collect::>() +// }) +// }); +// c.bench_function("encode/decode dynamic details (1000 items)", |b| { +// b.iter(|| { +// dclone +// .iter() +// .map(|a| serialize(&a).unwrap()) +// .map(|b| deserialize::(&b).unwrap()) +// .collect::>() +// }) +// }); +// } -criterion_group!(benches, bincode_decode_benchmark); +criterion_group!( + benches, + bincode_decode_benchmark, + flatbuffer_vs_bincode_merge_functions_benchmark +); criterion_main!(benches); diff --git a/rocks-db/src/asset.rs b/rocks-db/src/asset.rs index 9362a1228..2a8f9d965 100644 --- a/rocks-db/src/asset.rs +++ b/rocks-db/src/asset.rs @@ -3,27 +3,31 @@ use std::collections::HashMap; use crate::inscriptions::{Inscription, InscriptionData}; use bincode::{deserialize, serialize}; use entities::enums::{ChainMutability, OwnerType, RoyaltyTargetType, SpecificationAssetClass}; -use entities::models::{EditionData, OffChainData, SplMint, TokenAccount, UpdateVersion, Updated}; +use entities::models::{ + AssetIndex, EditionData, OffChainData, SplMint, TokenAccount, UpdateVersion, Updated, + UrlWithStatus, +}; +use flatbuffers::{FlatBufferBuilder, WIPOffset}; use rocksdb::MergeOperands; use serde::{Deserialize, Serialize}; use solana_sdk::{hash::Hash, pubkey::Pubkey}; -use std::cmp::Ordering; +use std::cmp::{max, Ordering}; use tracing::{error, warn}; +use crate::asset_generated::asset as fb; use crate::key_encoders::{decode_pubkey, decode_u64_pubkey, encode_pubkey, encode_u64_pubkey}; use crate::Result; use crate::TypedColumn; +const MAX_OTHER_OWNERS: usize = 10; + #[derive(Debug)] pub struct AssetSelectedMaps { - pub assets_static: HashMap, - pub assets_dynamic: HashMap, - pub assets_authority: HashMap, - pub assets_collection: HashMap, - pub assets_owner: HashMap, + pub asset_complete_details: HashMap, + pub mpl_core_collections: HashMap, pub assets_leaf: HashMap, pub offchain_data: HashMap, - pub urls: HashMap, + pub urls: HashMap, pub editions: HashMap, pub inscriptions: HashMap, pub inscriptions_data: HashMap, @@ -31,10 +35,1090 @@ pub struct AssetSelectedMaps { pub spl_mints: HashMap, } +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] +pub struct AssetCompleteDetails { + pub pubkey: Pubkey, + pub static_details: Option, + pub dynamic_details: Option, + pub authority: Option, + pub owner: Option, + pub collection: Option, +} + +impl From for AssetCompleteDetails { + fn from(value: AssetStaticDetails) -> Self { + Self { + pubkey: value.pubkey, + static_details: Some(value.clone()), + dynamic_details: None, + authority: None, + owner: None, + collection: None, + } + } +} + +impl AssetCompleteDetails { + pub fn convert_to_fb_bytes(&self) -> Vec { + let mut builder = FlatBufferBuilder::new(); + let asset_complete_details = self.convert_to_fb(&mut builder); + builder.finish_minimal(asset_complete_details); + builder.finished_data().to_vec() + } + + pub fn convert_to_fb<'a>( + &self, + builder: &mut FlatBufferBuilder<'a>, + ) -> WIPOffset> { + let pk = Some(builder.create_vector(&self.pubkey.to_bytes())); + let static_details = self + .static_details + .as_ref() + .map(|sd| asset_static_details_to_fb(builder, sd)); + let dynamic_details = self + .dynamic_details + .as_ref() + .map(|dd| asset_dynamic_details_to_fb(builder, dd)); + let authority = self + .authority + .as_ref() + .map(|a| asset_authority_to_fb(builder, a)); + let owner = self.owner.as_ref().map(|o| asset_owner_to_fb(builder, o)); + let collection = self + .collection + .as_ref() + .map(|c| asset_collection_to_fb(builder, c)); + fb::AssetCompleteDetails::create( + builder, + &fb::AssetCompleteDetailsArgs { + pubkey: pk, + static_details, + dynamic_details, + authority, + owner, + collection, + other_known_owners: None, + }, + ) + } +} + +impl<'a> From> for AssetCompleteDetails { + fn from(value: fb::AssetCompleteDetails<'a>) -> Self { + let pubkey = Pubkey::try_from(value.pubkey().unwrap().bytes()).unwrap(); + AssetCompleteDetails { + pubkey, + static_details: value.static_details().map(From::from), + dynamic_details: value.dynamic_details().map(From::from), + authority: value.authority().map(From::from), + owner: value.owner().map(From::from), + collection: value.collection().map(From::from), + } + } +} + +impl<'a> From> for AssetStaticDetails { + fn from(value: fb::AssetStaticDetails<'a>) -> Self { + let pubkey = Pubkey::try_from(value.pubkey().unwrap().bytes()).unwrap(); + let edition_address = value + .edition_address() + .map(|ea| Pubkey::try_from(ea.bytes()).unwrap()); + AssetStaticDetails { + pubkey, + specification_asset_class: value.specification_asset_class().into(), + royalty_target_type: value.royalty_target_type().into(), + created_at: value.created_at(), + edition_address, + } + } +} + +impl<'a> From> for AssetDynamicDetails { + fn from(value: fb::AssetDynamicDetails<'a>) -> Self { + let pubkey = Pubkey::try_from(value.pubkey().unwrap().bytes()).unwrap(); + let is_compressible = value.is_compressible().map(updated_bool_from_fb).unwrap(); + let is_compressed = value.is_compressed().map(updated_bool_from_fb).unwrap(); + let is_frozen = value.is_frozen().map(updated_bool_from_fb).unwrap(); + let supply = value.supply().map(updated_u64_from_fb); + let seq = value.seq().map(updated_u64_from_fb); + let is_burnt = value.is_burnt().map(updated_bool_from_fb).unwrap(); + let was_decompressed = value.was_decompressed().map(updated_bool_from_fb); + let onchain_data = value.onchain_data().and_then(updated_string_from_fb); + let creators = value.creators().map(updated_creators_from_fb).unwrap(); + let royalty_amount = value.royalty_amount().map(updated_u16_from_fb).unwrap(); + let url = value.url().and_then(updated_string_from_fb).unwrap(); + let chain_mutability = value + .chain_mutability() + .map(updated_chain_mutability_from_fb); + let lamports = value.lamports().map(updated_u64_from_fb); + let executable = value.executable().map(updated_bool_from_fb); + let metadata_owner = value.metadata_owner().and_then(updated_string_from_fb); + let raw_name = value.raw_name().and_then(updated_string_from_fb); + let mpl_core_plugins = value.mpl_core_plugins().and_then(updated_string_from_fb); + let mpl_core_unknown_plugins = value + .mpl_core_unknown_plugins() + .and_then(updated_string_from_fb); + let rent_epoch = value.rent_epoch().map(updated_u64_from_fb); + let num_minted = value.num_minted().map(updated_u32_from_fb); + let current_size = value.current_size().map(updated_u32_from_fb); + let plugins_json_version = value.plugins_json_version().map(updated_u32_from_fb); + let mpl_core_external_plugins = value + .mpl_core_external_plugins() + .and_then(updated_string_from_fb); + let mpl_core_unknown_external_plugins = value + .mpl_core_unknown_external_plugins() + .and_then(updated_string_from_fb); + let mint_extensions = value.mint_extensions().and_then(updated_string_from_fb); + AssetDynamicDetails { + pubkey, + is_compressible, + is_compressed, + is_frozen, + supply, + seq, + is_burnt, + was_decompressed, + onchain_data, + creators, + royalty_amount, + url, + chain_mutability, + lamports, + executable, + metadata_owner, + raw_name, + mpl_core_plugins, + mpl_core_unknown_plugins, + rent_epoch, + num_minted, + current_size, + plugins_json_version, + mpl_core_external_plugins, + mpl_core_unknown_external_plugins, + mint_extensions, + } + } +} + +impl<'a> From> for AssetAuthority { + fn from(value: fb::AssetAuthority<'a>) -> Self { + let pubkey = Pubkey::try_from(value.pubkey().unwrap().bytes()).unwrap(); + let v; + // using unsafe because the generated code does not have a safe way to get the optional value without default + unsafe { + v = value + ._tab + .get::(fb::AssetAuthority::VT_WRITE_VERSION, None); + } + let authority = Pubkey::try_from(value.authority().unwrap().bytes()).unwrap(); + AssetAuthority { + pubkey, + authority, + slot_updated: value.slot_updated(), + write_version: v, + } + } +} + +impl<'a> From> for AssetOwner { + fn from(value: fb::AssetOwner<'a>) -> Self { + let pubkey = Pubkey::try_from(value.pubkey().unwrap().bytes()).unwrap(); + let owner = value.owner().map(updated_optional_pubkey_from_fb).unwrap(); + let delegate = value + .delegate() + .map(updated_optional_pubkey_from_fb) + .unwrap(); + let owner_type = value.owner_type().map(updated_owner_type_from_fb).unwrap(); + let owner_delegate_seq = value + .owner_delegate_seq() + .map(updated_optional_u64_from_fb) + .unwrap(); + let is_current_owner = value + .is_current_owner() + .map(updated_bool_from_fb) + .unwrap_or_default(); + AssetOwner { + pubkey, + owner, + delegate, + owner_type, + owner_delegate_seq, + is_current_owner, + } + } +} + +impl<'a> From> for AssetCollection { + fn from(value: fb::AssetCollection<'a>) -> Self { + let pubkey = Pubkey::try_from(value.pubkey().unwrap().bytes()).unwrap(); + let collection = value.collection().map(updated_pubkey_from_fb).unwrap(); + let is_collection_verified = value + .is_collection_verified() + .map(updated_bool_from_fb) + .unwrap(); + let authority = value + .authority() + .map(updated_optional_pubkey_from_fb) + .unwrap(); + AssetCollection { + pubkey, + collection, + is_collection_verified, + authority, + } + } +} + +impl AssetStaticDetails { + pub fn convert_to_fb<'a>( + &self, + builder: &mut FlatBufferBuilder<'a>, + ) -> WIPOffset> { + let pk = Some(builder.create_vector(&self.pubkey.to_bytes())); + let static_details = asset_static_details_to_fb(builder, self); + fb::AssetCompleteDetails::create( + builder, + &fb::AssetCompleteDetailsArgs { + pubkey: pk, + static_details: Some(static_details), + dynamic_details: None, + authority: None, + owner: None, + collection: None, + other_known_owners: None, + }, + ) + } +} + +impl AssetDynamicDetails { + pub fn convert_to_fb<'a>( + &self, + builder: &mut FlatBufferBuilder<'a>, + ) -> WIPOffset> { + let pk = Some(builder.create_vector(&self.pubkey.to_bytes())); + let dynamic_details = Some(asset_dynamic_details_to_fb(builder, self)); + fb::AssetCompleteDetails::create( + builder, + &fb::AssetCompleteDetailsArgs { + pubkey: pk, + static_details: None, + dynamic_details, + authority: None, + owner: None, + collection: None, + other_known_owners: None, + }, + ) + } +} + +impl AssetAuthority { + pub fn convert_to_fb<'a>( + &self, + builder: &mut FlatBufferBuilder<'a>, + ) -> WIPOffset> { + let pubkey = Some(builder.create_vector(&self.pubkey.to_bytes())); + let authority = Some(asset_authority_to_fb(builder, self)); + fb::AssetCompleteDetails::create( + builder, + &fb::AssetCompleteDetailsArgs { + pubkey, + static_details: None, + dynamic_details: None, + authority, + owner: None, + collection: None, + other_known_owners: None, + }, + ) + } +} + +impl AssetOwner { + pub fn convert_to_fb<'a>( + &self, + builder: &mut FlatBufferBuilder<'a>, + ) -> WIPOffset> { + let pubkey = Some(builder.create_vector(&self.pubkey.to_bytes())); + let owner = Some(asset_owner_to_fb(builder, self)); + fb::AssetCompleteDetails::create( + builder, + &fb::AssetCompleteDetailsArgs { + pubkey, + static_details: None, + dynamic_details: None, + authority: None, + owner, + collection: None, + other_known_owners: None, + }, + ) + } +} + +impl AssetCollection { + pub fn convert_to_fb<'a>( + &self, + builder: &mut FlatBufferBuilder<'a>, + ) -> WIPOffset> { + let pubkey = Some(builder.create_vector(&self.pubkey.to_bytes())); + let collection = Some(asset_collection_to_fb(builder, self)); + fb::AssetCompleteDetails::create( + builder, + &fb::AssetCompleteDetailsArgs { + pubkey, + static_details: None, + dynamic_details: None, + authority: None, + owner: None, + collection, + other_known_owners: None, + }, + ) + } +} + +fn asset_static_details_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + static_details: &AssetStaticDetails, +) -> WIPOffset> { + let pubkey_bytes = pubkey_to_bytes(&static_details.pubkey); + let pubkey_fb = builder.create_vector(&pubkey_bytes); + + let edition_address_fb = static_details.edition_address.as_ref().map(|ea| { + let ea_bytes = pubkey_to_bytes(ea); + builder.create_vector(&ea_bytes) + }); + + fb::AssetStaticDetails::create( + builder, + &fb::AssetStaticDetailsArgs { + pubkey: Some(pubkey_fb), + specification_asset_class: static_details.specification_asset_class.into(), + royalty_target_type: static_details.royalty_target_type.into(), + created_at: static_details.created_at, + edition_address: edition_address_fb, + }, + ) +} + +fn asset_dynamic_details_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + dynamic_details: &AssetDynamicDetails, +) -> WIPOffset> { + let pubkey_bytes = pubkey_to_bytes(&dynamic_details.pubkey); + let pubkey_fb = builder.create_vector(&pubkey_bytes); + + let is_compressible_fb = updated_bool_to_fb(builder, &dynamic_details.is_compressible); + let is_compressed_fb = updated_bool_to_fb(builder, &dynamic_details.is_compressed); + let is_frozen_fb = updated_bool_to_fb(builder, &dynamic_details.is_frozen); + + // Optional fields + let supply_fb = dynamic_details + .supply + .as_ref() + .map(|supply| updated_u64_to_fb(builder, supply)); + let seq_fb = dynamic_details + .seq + .as_ref() + .map(|seq| updated_u64_to_fb(builder, seq)); + let is_burnt_fb = updated_bool_to_fb(builder, &dynamic_details.is_burnt); + let was_decompressed_fb = dynamic_details.was_decompressed.as_ref().map(|was_dec| updated_bool_to_fb(builder, was_dec)); + let onchain_data_fb = dynamic_details + .onchain_data + .as_ref() + .map(|onchain_data| updated_string_to_fb(builder, onchain_data)); + let creators_fb = updated_creators_to_fb(builder, &dynamic_details.creators); + let royalty_amount_fb = updated_u16_to_u32_fb(builder, &dynamic_details.royalty_amount); + let url_fb = updated_string_to_fb(builder, &dynamic_details.url); + let chain_mutability_fb = dynamic_details + .chain_mutability + .as_ref() + .map(|chain_mutability| updated_chain_mutability_to_fb(builder, chain_mutability)); + let lamports_fb = dynamic_details + .lamports + .as_ref() + .map(|lamports| updated_u64_to_fb(builder, lamports)); + let executable_fb = dynamic_details + .executable + .as_ref() + .map(|executable| updated_bool_to_fb(builder, executable)); + let metadata_owner_fb = dynamic_details + .metadata_owner + .as_ref() + .map(|metadata_owner| updated_string_to_fb(builder, metadata_owner)); + let raw_name_fb = dynamic_details + .raw_name + .as_ref() + .map(|raw_name| updated_string_to_fb(builder, raw_name)); + let mpl_core_plugins_fb = dynamic_details + .mpl_core_plugins + .as_ref() + .map(|mpl_core_plugins| updated_string_to_fb(builder, mpl_core_plugins)); + let mpl_core_unknown_plugins_fb = dynamic_details + .mpl_core_unknown_plugins + .as_ref() + .map(|mpl_core_unknown_plugins| updated_string_to_fb(builder, mpl_core_unknown_plugins)); + let rent_epoch_fb = dynamic_details + .rent_epoch + .as_ref() + .map(|rent_epoch| updated_u64_to_fb(builder, rent_epoch)); + let num_minted_fb = dynamic_details + .num_minted + .as_ref() + .map(|num_minted| updated_u32_to_fb(builder, num_minted)); + let current_size_fb = dynamic_details + .current_size + .as_ref() + .map(|current_size| updated_u32_to_fb(builder, current_size)); + let plugins_json_version_fb = dynamic_details + .plugins_json_version + .as_ref() + .map(|plugins_json_version| updated_u32_to_fb(builder, plugins_json_version)); + let mpl_core_external_plugins_fb = dynamic_details + .mpl_core_external_plugins + .as_ref() + .map(|mpl_core_external_plugins| updated_string_to_fb(builder, mpl_core_external_plugins)); + let mpl_core_unknown_external_plugins_fb = dynamic_details + .mpl_core_unknown_external_plugins + .as_ref() + .map(|mpl_core_unknown_external_plugins| { + updated_string_to_fb(builder, mpl_core_unknown_external_plugins) + }); + let mint_extensions_fb = dynamic_details + .mint_extensions + .as_ref() + .map(|mint_extensions| updated_string_to_fb(builder, mint_extensions)); + // Continue converting other fields similarly + + fb::AssetDynamicDetails::create( + builder, + &fb::AssetDynamicDetailsArgs { + pubkey: Some(pubkey_fb), + is_compressible: Some(is_compressible_fb), + is_compressed: Some(is_compressed_fb), + is_frozen: Some(is_frozen_fb), + supply: supply_fb, + seq: seq_fb, + is_burnt: Some(is_burnt_fb), + was_decompressed: was_decompressed_fb, + onchain_data: onchain_data_fb, + creators: Some(creators_fb), + royalty_amount: Some(royalty_amount_fb), + url: Some(url_fb), + chain_mutability: chain_mutability_fb, + lamports: lamports_fb, + executable: executable_fb, + metadata_owner: metadata_owner_fb, + raw_name: raw_name_fb, + mpl_core_plugins: mpl_core_plugins_fb, + mpl_core_unknown_plugins: mpl_core_unknown_plugins_fb, + rent_epoch: rent_epoch_fb, + num_minted: num_minted_fb, + current_size: current_size_fb, + plugins_json_version: plugins_json_version_fb, + mpl_core_external_plugins: mpl_core_external_plugins_fb, + mpl_core_unknown_external_plugins: mpl_core_unknown_external_plugins_fb, + mint_extensions: mint_extensions_fb, + }, + ) +} + +fn asset_authority_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + authority: &AssetAuthority, +) -> WIPOffset> { + let pubkey_bytes = pubkey_to_bytes(&authority.pubkey); + let pubkey_fb = builder.create_vector(&pubkey_bytes); + + let authority_bytes = pubkey_to_bytes(&authority.authority); + let authority_fb = builder.create_vector(&authority_bytes); + + let mut b = fb::AssetAuthorityBuilder::new(builder); + if let Some(wv) = authority.write_version { + b.add_write_version(wv); + } + b.add_slot_updated(authority.slot_updated); + b.add_authority(authority_fb); + b.add_pubkey(pubkey_fb); + b.finish() +} +fn asset_owner_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + owner: &AssetOwner, +) -> WIPOffset> { + let pubkey_bytes = pubkey_to_bytes(&owner.pubkey); + let pubkey_fb = builder.create_vector(&pubkey_bytes); + + let owner_fb = updated_optional_pubkey_to_fb(builder, &owner.owner); + let delegate_fb = updated_optional_pubkey_to_fb(builder, &owner.delegate); + let owner_type_fb = updated_owner_type_to_fb(builder, &owner.owner_type); + let owner_delegate_seq_fb = updated_optional_u64_to_fb(builder, &owner.owner_delegate_seq); + let is_current_owner = updated_bool_to_fb(builder, &owner.is_current_owner); + + fb::AssetOwner::create( + builder, + &fb::AssetOwnerArgs { + pubkey: Some(pubkey_fb), + owner: Some(owner_fb), + delegate: Some(delegate_fb), + owner_type: Some(owner_type_fb), + owner_delegate_seq: Some(owner_delegate_seq_fb), + is_current_owner: Some(is_current_owner), + }, + ) +} +fn asset_collection_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + collection: &AssetCollection, +) -> WIPOffset> { + let pubkey_bytes = pubkey_to_bytes(&collection.pubkey); + let pubkey_fb = builder.create_vector(&pubkey_bytes); + + let collection_fb = updated_pubkey_to_fb(builder, &collection.collection); + let is_collection_verified_fb = updated_bool_to_fb(builder, &collection.is_collection_verified); + let authority_fb = updated_optional_pubkey_to_fb(builder, &collection.authority); + + fb::AssetCollection::create( + builder, + &fb::AssetCollectionArgs { + pubkey: Some(pubkey_fb), + collection: Some(collection_fb), + is_collection_verified: Some(is_collection_verified_fb), + authority: Some(authority_fb), + }, + ) +} + +fn updated_bool_from_fb(updated: fb::UpdatedBool) -> Updated { + Updated { + slot_updated: updated.slot_updated(), + update_version: updated + .update_version() + .and_then(convert_update_version_from_fb), + value: updated.value(), + } +} + +fn updated_bool_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + updated: &Updated, +) -> WIPOffset> { + let update_version = convert_update_version_to_fb(builder, &updated.update_version); + + fb::UpdatedBool::create( + builder, + &fb::UpdatedBoolArgs { + slot_updated: updated.slot_updated, + update_version: Some(update_version), + value: updated.value, + }, + ) +} + +fn updated_u64_from_fb(updated: fb::UpdatedU64) -> Updated { + Updated { + slot_updated: updated.slot_updated(), + update_version: updated + .update_version() + .and_then(convert_update_version_from_fb), + value: updated.value(), + } +} + +fn updated_u64_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + updated: &Updated, +) -> WIPOffset> { + let update_version = convert_update_version_to_fb(builder, &updated.update_version); + + fb::UpdatedU64::create( + builder, + &fb::UpdatedU64Args { + slot_updated: updated.slot_updated, + update_version: Some(update_version), + value: updated.value, + }, + ) +} + +fn updated_u32_from_fb(updated: fb::UpdatedU32) -> Updated { + Updated { + slot_updated: updated.slot_updated(), + update_version: updated + .update_version() + .and_then(convert_update_version_from_fb), + value: updated.value(), + } +} + +fn updated_u32_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + updated: &Updated, +) -> WIPOffset> { + let update_version = convert_update_version_to_fb(builder, &updated.update_version); + + fb::UpdatedU32::create( + builder, + &fb::UpdatedU32Args { + slot_updated: updated.slot_updated, + update_version: Some(update_version), + value: updated.value, + }, + ) +} + +fn updated_u16_from_fb(updated: fb::UpdatedU32) -> Updated { + Updated { + slot_updated: updated.slot_updated(), + update_version: updated + .update_version() + .and_then(convert_update_version_from_fb), + value: updated.value() as u16, + } +} + +fn updated_u16_to_u32_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + updated: &Updated, +) -> WIPOffset> { + let update_version = convert_update_version_to_fb(builder, &updated.update_version); + + fb::UpdatedU32::create( + builder, + &fb::UpdatedU32Args { + slot_updated: updated.slot_updated, + update_version: Some(update_version), + value: updated.value as u32, + }, + ) +} + +fn updated_string_from_fb(updated: fb::UpdatedString) -> Option> { + updated.value().map(|value| Updated { + slot_updated: updated.slot_updated(), + update_version: updated + .update_version() + .and_then(convert_update_version_from_fb), + value: value.to_string(), + }) +} + +fn updated_string_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + updated: &Updated, +) -> WIPOffset> { + let update_version = convert_update_version_to_fb(builder, &updated.update_version); + + let value = builder.create_string(&updated.value); + + fb::UpdatedString::create( + builder, + &fb::UpdatedStringArgs { + slot_updated: updated.slot_updated, + update_version: Some(update_version), + value: Some(value), + }, + ) +} + +fn updated_pubkey_from_fb(updated: fb::UpdatedPubkey) -> Updated { + Updated { + slot_updated: updated.slot_updated(), + update_version: updated + .update_version() + .and_then(convert_update_version_from_fb), + value: Pubkey::try_from(updated.value().unwrap().bytes()).unwrap(), + } +} + +fn updated_pubkey_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + updated: &Updated, +) -> WIPOffset> { + let update_version = convert_update_version_to_fb(builder, &updated.update_version); + + let pubkey_bytes = pubkey_to_bytes(&updated.value); + let pubkey_fb = builder.create_vector(&pubkey_bytes); + + fb::UpdatedPubkey::create( + builder, + &fb::UpdatedPubkeyArgs { + slot_updated: updated.slot_updated, + update_version: Some(update_version), + value: Some(pubkey_fb), + }, + ) +} + +fn updated_optional_pubkey_from_fb(updated: fb::UpdatedOptionalPubkey) -> Updated> { + Updated { + slot_updated: updated.slot_updated(), + update_version: updated + .update_version() + .and_then(convert_update_version_from_fb), + value: updated + .value() + .map(|pubkey| Pubkey::try_from(pubkey.bytes()).unwrap()), + } +} + +fn updated_optional_pubkey_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + updated: &Updated>, +) -> WIPOffset> { + let update_version = convert_update_version_to_fb(builder, &updated.update_version); + + let value = updated.value.as_ref().map(|pubkey| { + let pubkey_bytes = pubkey_to_bytes(pubkey); + builder.create_vector(&pubkey_bytes) + }); + + fb::UpdatedOptionalPubkey::create( + builder, + &fb::UpdatedOptionalPubkeyArgs { + slot_updated: updated.slot_updated, + update_version: Some(update_version), + value, + }, + ) +} +fn updated_optional_u64_from_fb(updated: fb::UpdatedU64) -> Updated> { + let v; + unsafe { + v = updated._tab.get::(fb::UpdatedU64::VT_VALUE, None); + } + Updated { + slot_updated: updated.slot_updated(), + update_version: updated + .update_version() + .and_then(convert_update_version_from_fb), + value: v, + } +} + +fn updated_optional_u64_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + updated: &Updated>, +) -> WIPOffset> { + let update_version = convert_update_version_to_fb(builder, &updated.update_version); + let mut ub = fb::UpdatedU64Builder::new(builder); + ub.add_slot_updated(updated.slot_updated); + ub.add_update_version(update_version); + if let Some(value) = updated.value { + ub.add_value(value); + } + ub.finish() +} + +fn creator_from_fb(creator: fb::Creator) -> entities::models::Creator { + entities::models::Creator { + creator: Pubkey::try_from(creator.creator().unwrap().bytes()).unwrap(), + creator_verified: creator.creator_verified(), + creator_share: creator.creator_share() as u8, + } +} + +fn updated_creators_from_fb( + updated: fb::UpdatedCreators, +) -> Updated> { + let mut ve = Vec::new(); + if let Some(cc) = updated.value() { + for creator in cc { + ve.push(creator_from_fb(creator)); + } + } + Updated { + slot_updated: updated.slot_updated(), + update_version: updated + .update_version() + .and_then(convert_update_version_from_fb), + value: ve, + } +} + +fn updated_creators_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + updated: &Updated>, +) -> WIPOffset> { + let update_version = convert_update_version_to_fb(builder, &updated.update_version); + let mut creators = Vec::with_capacity(updated.value.len()); + for creator in &updated.value { + let pubkey_bytes = pubkey_to_bytes(&creator.creator); + let pubkey_fb = builder.create_vector(&pubkey_bytes); + + let creator_fb = fb::Creator::create( + builder, + &fb::CreatorArgs { + creator: Some(pubkey_fb), + creator_verified: creator.creator_verified, + creator_share: creator.creator_share as u32, + }, + ); + creators.push(creator_fb); + } + + let creators_fb = builder.create_vector(creators.as_slice()); + + fb::UpdatedCreators::create( + builder, + &fb::UpdatedCreatorsArgs { + slot_updated: updated.slot_updated, + update_version: Some(update_version), + value: Some(creators_fb), + }, + ) +} + +fn updated_owner_type_from_fb(updated: fb::UpdatedOwnerType) -> Updated { + Updated { + slot_updated: updated.slot_updated(), + update_version: updated + .update_version() + .and_then(convert_update_version_from_fb), + value: updated.value().into(), + } +} + +fn updated_owner_type_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + updated: &Updated, +) -> WIPOffset> { + let update_version = convert_update_version_to_fb(builder, &updated.update_version); + + fb::UpdatedOwnerType::create( + builder, + &fb::UpdatedOwnerTypeArgs { + slot_updated: updated.slot_updated, + update_version: Some(update_version), + value: updated.value.into(), + }, + ) +} + +fn updated_chain_mutability_from_fb( + updated: fb::UpdatedChainMutability, +) -> Updated { + Updated { + slot_updated: updated.slot_updated(), + update_version: updated + .update_version() + .and_then(convert_update_version_from_fb), + value: updated.value().into(), + } +} + +fn updated_chain_mutability_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + updated: &Updated, +) -> WIPOffset> { + let update_version = convert_update_version_to_fb(builder, &updated.update_version); + + fb::UpdatedChainMutability::create( + builder, + &fb::UpdatedChainMutabilityArgs { + slot_updated: updated.slot_updated, + update_version: Some(update_version), + value: updated.value.into(), + }, + ) +} + +fn convert_update_version_to_fb<'a>( + builder: &mut FlatBufferBuilder<'a>, + update_version: &Option, +) -> WIPOffset> { + let (version_type, version_value) = match update_version { + Some(UpdateVersion::Sequence(seq)) => (fb::UpdateVersionType::Sequence, *seq), + Some(UpdateVersion::WriteVersion(wv)) => (fb::UpdateVersionType::WriteVersion, *wv), + None => (fb::UpdateVersionType::None, 0), + }; + + fb::UpdateVersion::create( + builder, + &fb::UpdateVersionArgs { + version_type, + version_value, + }, + ) +} + +fn convert_update_version_from_fb(update_version: fb::UpdateVersion) -> Option { + match update_version.version_type() { + fb::UpdateVersionType::Sequence => { + Some(UpdateVersion::Sequence(update_version.version_value())) + } + fb::UpdateVersionType::WriteVersion => { + Some(UpdateVersion::WriteVersion(update_version.version_value())) + } + fb::UpdateVersionType::None => None, + _ => None, + } +} + +fn pubkey_to_bytes(pubkey: &Pubkey) -> [u8; 32] { + pubkey.to_bytes() +} + +impl From<&AssetDynamicDetails> for AssetCompleteDetails { + fn from(value: &AssetDynamicDetails) -> Self { + Self { + pubkey: value.pubkey, + static_details: None, + dynamic_details: Some(value.clone()), + authority: None, + owner: None, + collection: None, + } + } +} + +impl From for AssetCompleteDetails { + fn from(value: AssetAuthority) -> Self { + Self { + pubkey: value.pubkey, + static_details: None, + dynamic_details: None, + authority: Some(value.clone()), + owner: None, + collection: None, + } + } +} + +// impl From for AssetCompleteDetails { +// fn from(value: AssetOwner) -> Self { +// Self { +// pubkey: value.pubkey, // todo: what do I do with this? For token accounts it's wrong +// static_details: None, +// dynamic_details: None, +// authority: None, +// owner: Some(value.clone()), +// collection: None, +// } +// } +// } + +impl From for AssetCompleteDetails { + fn from(value: AssetCollection) -> Self { + Self { + pubkey: value.pubkey, + static_details: None, + dynamic_details: None, + authority: None, + owner: None, + collection: Some(value.clone()), + } + } +} + +impl AssetCompleteDetails { + pub fn get_slot_updated(&self) -> u64 { + // Collect the slot_updated values from all available fields + let slots = [ + self.dynamic_details.as_ref().map(|d| d.get_slot_updated()), + self.authority.as_ref().map(|a| a.slot_updated), + self.owner.as_ref().map(|o| o.get_slot_updated()), + self.collection.as_ref().map(|c| c.get_slot_updated()), + ]; + // Filter out None values and find the maximum slot_updated + slots.iter().filter_map(|&slot| slot).max().unwrap_or(0) + } + + pub fn any_field_is_set(&self) -> bool { + self.static_details.is_some() + || self.dynamic_details.is_some() + || self.authority.is_some() + || self.owner.is_some() + || self.collection.is_some() + } + + pub fn to_index_without_url_checks( + &self, + mpl_core_collections: &HashMap, + ) -> AssetIndex { + AssetIndex { + pubkey: self.pubkey, + specification_version: entities::enums::SpecificationVersions::V1, + specification_asset_class: self + .static_details + .as_ref() + .map(|a| a.specification_asset_class) + .unwrap_or_default(), + royalty_target_type: self + .static_details + .as_ref() + .map(|a| a.royalty_target_type) + .unwrap_or_default(), + slot_created: self + .static_details + .as_ref() + .map(|a| a.created_at) + .unwrap_or_default(), + is_compressible: self + .dynamic_details + .as_ref() + .map(|d| d.is_compressible.value) + .unwrap_or_default(), + is_compressed: self + .dynamic_details + .as_ref() + .map(|d| d.is_compressed.value) + .unwrap_or_default(), + is_frozen: self + .dynamic_details + .as_ref() + .map(|d| d.is_frozen.value) + .unwrap_or_default(), + supply: self + .dynamic_details + .as_ref() + .map(|d| d.supply.clone().map(|s| s.value as i64)) + .unwrap_or_default(), + is_burnt: self + .dynamic_details + .as_ref() + .map(|d| d.is_burnt.value) + .unwrap_or_default(), + creators: self + .dynamic_details + .as_ref() + .map(|d| d.creators.clone().value) + .unwrap_or_default(), + royalty_amount: self + .dynamic_details + .as_ref() + .map(|d| d.royalty_amount.value as i64) + .unwrap_or_default(), + slot_updated: self.get_slot_updated() as i64, + metadata_url: self + .dynamic_details + .as_ref() + .map(|d| UrlWithStatus::new(&d.url.value, false)), + authority: self.authority.as_ref().map(|a| a.authority), + owner: self.owner.as_ref().and_then(|o| o.owner.value), + delegate: self.owner.as_ref().and_then(|o| o.delegate.value), + owner_type: self.owner.as_ref().map(|o| o.owner_type.value), + collection: self.collection.as_ref().map(|c| c.collection.value), + is_collection_verified: self + .collection + .as_ref() + .map(|c| c.is_collection_verified.value), + update_authority: self + .collection + .as_ref() + .and_then(|c| mpl_core_collections.get(&c.collection.value)) + .copied(), + fungible_asset_mint: None, + fungible_asset_balance: None, + } + } +} // The following structures are used to store the asset data in the rocksdb database. The data is spread across multiple columns based on the update pattern. // The final representation of the asset should be reconstructed by querying the database for the asset and its associated columns. // Some values, like slot_updated should be calculated based on the latest update to the asset. -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct AssetStaticDetails { pub pubkey: Pubkey, pub specification_asset_class: SpecificationAssetClass, @@ -51,7 +1135,7 @@ pub struct AssetStaticDetailsDeprecated { pub created_at: i64, } -#[derive(Serialize, Deserialize, Debug, Clone, Default)] +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] pub struct AssetDynamicDetails { pub pubkey: Pubkey, pub is_compressible: Updated, @@ -60,7 +1144,7 @@ pub struct AssetDynamicDetails { pub supply: Option>, pub seq: Option>, pub is_burnt: Updated, - pub was_decompressed: Updated, + pub was_decompressed: Option>, pub onchain_data: Option>, pub creators: Updated>, pub royalty_amount: Updated, @@ -105,7 +1189,7 @@ pub struct MetadataMintMap { pub mint_key: Pubkey, } -#[derive(Serialize, Deserialize, Debug, Default, Clone)] +#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)] pub struct AssetAuthority { pub pubkey: Pubkey, pub authority: Pubkey, @@ -120,13 +1204,14 @@ pub struct AssetAuthorityDeprecated { pub slot_updated: u64, } -#[derive(Serialize, Deserialize, Debug, Clone, Default)] +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] pub struct AssetOwner { pub pubkey: Pubkey, pub owner: Updated>, pub delegate: Updated>, pub owner_type: Updated, pub owner_delegate_seq: Updated>, + pub is_current_owner: Updated, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -153,7 +1238,7 @@ pub struct AssetLeaf { pub slot_updated: u64, } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct AssetCollection { pub pubkey: Pubkey, pub collection: Updated, @@ -222,6 +1307,63 @@ pub(crate) fn update_optional_field( } } +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub struct MplCoreCollectionAuthority { + pub authority: Updated>, +} + +impl TypedColumn for MplCoreCollectionAuthority { + type KeyType = Pubkey; + type ValueType = Self; + const NAME: &'static str = "MPL_CORE_COLLECTION_AUTHORITY"; + + fn encode_key(pubkey: Pubkey) -> Vec { + encode_pubkey(pubkey) + } + + fn decode_key(bytes: Vec) -> Result { + decode_pubkey(bytes) + } +} + +impl MplCoreCollectionAuthority { + pub fn merge( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: &MergeOperands, + ) -> Option> { + let mut result: Option = None; + if let Some(existing_val) = existing_val { + match deserialize::(existing_val) { + Ok(value) => { + result = Some(value); + } + Err(e) => { + error!("RocksDB: AssetCollection deserialize existing_val: {}", e) + } + } + } + + for op in operands { + match deserialize::(op) { + Ok(new_val) => { + result = Some(if let Some(mut current_val) = result { + update_field(&mut current_val.authority, &new_val.authority); + current_val + } else { + new_val + }); + } + Err(e) => { + error!("RocksDB: AssetCollection deserialize new_val: {}", e) + } + } + } + + result.and_then(|result| serialize(&result).ok()) + } +} + impl TypedColumn for AssetStaticDetails { type KeyType = Pubkey; type ValueType = Self; @@ -266,6 +1408,20 @@ impl TypedColumn for AssetDynamicDetailsDeprecated { } } +impl TypedColumn for AssetCompleteDetails { + type KeyType = Pubkey; + type ValueType = Self; + const NAME: &'static str = "ASSET_COMPLETE_DETAILS"; + + fn encode_key(pubkey: Pubkey) -> Vec { + encode_pubkey(pubkey) + } + + fn decode_key(bytes: Vec) -> Result { + decode_pubkey(bytes) + } +} + impl TypedColumn for AssetDynamicDetails { type KeyType = Pubkey; type ValueType = Self; @@ -347,11 +1503,68 @@ impl AssetStaticDetails { } } -impl AssetDynamicDetails { - pub fn merge_dynamic_details( +impl AssetCompleteDetails { + pub fn merge_complete_details( _new_key: &[u8], existing_val: Option<&[u8]>, operands: &MergeOperands, + ) -> Option> { + Self::merge_complete_details_raw(_new_key, existing_val, operands.iter()) + } + + pub fn merge_raw(existing: &mut Option, operands: &[Self]) { + for op in operands { + if let Some(ref mut current_val) = existing { + // Merge dynamic_details + if let Some(new_dynamic_details) = &op.dynamic_details { + if let Some(ref mut current_dynamic_details) = current_val.dynamic_details { + current_dynamic_details.merge(new_dynamic_details); + } else { + current_val.dynamic_details = Some(new_dynamic_details.to_owned()); + } + } + + // Keep existing static_details if present + if current_val.static_details.is_none() { + current_val.static_details = op.static_details.clone(); + } + + // Merge authority + if let Some(new_authority) = &op.authority { + if let Some(ref mut current_authority) = current_val.authority { + current_authority.merge(new_authority); + } else { + current_val.authority = Some(new_authority.clone()); + } + } + + // Merge owner + if let Some(new_owner) = &op.owner { + if let Some(ref mut current_owner) = current_val.owner { + current_owner.merge(new_owner); + } else { + current_val.owner = Some(new_owner.clone()); + } + } + + // Merge collection + if let Some(new_collection) = &op.collection { + if let Some(ref mut current_collection) = current_val.collection { + current_collection.merge(new_collection); + } else { + current_val.collection = Some(new_collection.clone()); + } + } + } else { + *existing = Some(op.clone()); + } + } + } + + pub fn merge_complete_details_raw<'a>( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: impl Iterator, ) -> Option> { let mut result: Option = None; if let Some(existing_val) = existing_val { @@ -361,7 +1574,7 @@ impl AssetDynamicDetails { } Err(e) => { error!( - "RocksDB: AssetDynamicDetails deserialize existing_val: {}", + "RocksDB: AssetCompleteDetails deserialize existing_val: {}", e ) } @@ -371,96 +1584,1542 @@ impl AssetDynamicDetails { for op in operands { match deserialize::(op) { Ok(new_val) => { - result = Some(if let Some(mut current_val) = result { - update_field(&mut current_val.is_compressible, &new_val.is_compressible); - update_field(&mut current_val.is_compressed, &new_val.is_compressed); - update_field(&mut current_val.is_frozen, &new_val.is_frozen); - update_optional_field(&mut current_val.supply, &new_val.supply); - update_optional_field(&mut current_val.seq, &new_val.seq); - update_field(&mut current_val.is_burnt, &new_val.is_burnt); - update_field(&mut current_val.creators, &new_val.creators); - update_field(&mut current_val.royalty_amount, &new_val.royalty_amount); - update_field(&mut current_val.was_decompressed, &new_val.was_decompressed); - update_optional_field(&mut current_val.onchain_data, &new_val.onchain_data); - update_field(&mut current_val.url, &new_val.url); - update_optional_field( - &mut current_val.chain_mutability, - &new_val.chain_mutability, - ); - update_optional_field(&mut current_val.lamports, &new_val.lamports); - update_optional_field(&mut current_val.executable, &new_val.executable); - update_optional_field( - &mut current_val.metadata_owner, - &new_val.metadata_owner, - ); - update_optional_field(&mut current_val.raw_name, &new_val.raw_name); - update_optional_field( - &mut current_val.mpl_core_plugins, - &new_val.mpl_core_plugins, - ); - update_optional_field( - &mut current_val.mpl_core_unknown_plugins, - &new_val.mpl_core_unknown_plugins, - ); - update_optional_field(&mut current_val.num_minted, &new_val.num_minted); - update_optional_field(&mut current_val.current_size, &new_val.current_size); - update_optional_field(&mut current_val.rent_epoch, &new_val.rent_epoch); - update_optional_field( - &mut current_val.plugins_json_version, - &new_val.plugins_json_version, - ); - update_optional_field( - &mut current_val.mpl_core_external_plugins, - &new_val.mpl_core_external_plugins, - ); - update_optional_field( - &mut current_val.mpl_core_unknown_external_plugins, - &new_val.mpl_core_unknown_external_plugins, - ); - update_optional_field( - &mut current_val.mint_extensions, - &new_val.mint_extensions, - ); + if let Some(ref mut current_val) = result { + // Merge dynamic_details + if let Some(new_dynamic_details) = new_val.dynamic_details { + if let Some(ref mut current_dynamic_details) = + current_val.dynamic_details + { + current_dynamic_details.merge(&new_dynamic_details); + } else { + current_val.dynamic_details = Some(new_dynamic_details); + } + } - current_val + // Keep existing static_details if present + if current_val.static_details.is_none() { + current_val.static_details = new_val.static_details; + } + + // Merge authority + if let Some(new_authority) = new_val.authority { + if let Some(ref mut current_authority) = current_val.authority { + current_authority.merge(&new_authority); + } else { + current_val.authority = Some(new_authority); + } + } + + // Merge owner + if let Some(new_owner) = new_val.owner { + if let Some(ref mut current_owner) = current_val.owner { + current_owner.merge(&new_owner); + } else { + current_val.owner = Some(new_owner); + } + } + + // Merge collection + if let Some(new_collection) = new_val.collection { + if let Some(ref mut current_collection) = current_val.collection { + current_collection.merge(&new_collection); + } else { + current_val.collection = Some(new_collection); + } + } } else { - new_val - }); + result = Some(new_val); + } } Err(e) => { - error!("RocksDB: AssetDynamicDetails deserialize new_val: {}", e) + error!("RocksDB: AssetCompleteDetails deserialize new_val: {}", e) } } } result.and_then(|result| serialize(&result).ok()) } +} + +pub fn merge_complete_details_fb( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: &MergeOperands, +) -> Option> { + merge_complete_details_fb_raw(_new_key, existing_val, operands.iter()) +} +pub fn merge_complete_details_fb_through_proxy<'a>( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: impl Iterator, +) -> Option> { + let mut existing_val = existing_val + .and_then(|bytes| { + fb::root_as_asset_complete_details(bytes) + .map_err(|e| { + error!( + "RocksDB: AssetCompleteDetails deserialize existing_val: {}", + e + ) + }) + .ok() + }) + .map(AssetCompleteDetails::from); + AssetCompleteDetails::merge_raw( + &mut existing_val, + operands + .filter_map(|op| fb::root_as_asset_complete_details(op).ok()) + .map(AssetCompleteDetails::from) + .collect::>() + .as_slice(), + ); + existing_val.map(|r| { + let mut builder = FlatBufferBuilder::with_capacity(2500); + let tt = r.convert_to_fb(&mut builder); + builder.finish_minimal(tt); + builder.finished_data().to_vec() + }) +} + +macro_rules! create_updated_primitive_offset { + ($func_name:ident, $updated_type:ident, $updated_args:ident) => { + fn $func_name<'a>( + builder: &mut flatbuffers::FlatBufferBuilder<'a>, + updated: &fb::$updated_type<'a>, + ) -> flatbuffers::WIPOffset> { + let update_version_offset = updated.update_version().map(|uv| { + fb::UpdateVersion::create( + builder, + &fb::UpdateVersionArgs { + version_type: uv.version_type(), + version_value: uv.version_value(), + }, + ) + }); + + fb::$updated_type::create( + builder, + &fb::$updated_args { + slot_updated: updated.slot_updated(), + update_version: update_version_offset, + value: updated.value(), + }, + ) + } + }; +} +macro_rules! create_updated_offset { + ($func_name:ident, $updated_type:ident, $updated_args:ident, $create_value_fn:expr) => { + fn $func_name<'a>( + builder: &mut flatbuffers::FlatBufferBuilder<'a>, + updated: &fb::$updated_type<'a>, + ) -> flatbuffers::WIPOffset> { + let update_version_offset = updated.update_version().map(|uv| { + fb::UpdateVersion::create( + builder, + &fb::UpdateVersionArgs { + version_type: uv.version_type(), + version_value: uv.version_value(), + }, + ) + }); + + let value_offset = updated + .value() + .map(|value| $create_value_fn(builder, value)); + + fb::$updated_type::create( + builder, + &fb::$updated_args { + slot_updated: updated.slot_updated(), + update_version: update_version_offset, + value: value_offset, + }, + ) + } + }; +} + +create_updated_primitive_offset!(create_updated_bool_offset, UpdatedBool, UpdatedBoolArgs); +create_updated_primitive_offset!(create_updated_u64_offset, UpdatedU64, UpdatedU64Args); +create_updated_primitive_offset!(create_updated_u32_offset, UpdatedU32, UpdatedU32Args); +create_updated_primitive_offset!( + create_updated_chain_mutability_offset, + UpdatedChainMutability, + UpdatedChainMutabilityArgs +); +create_updated_primitive_offset!( + create_updated_owner_type_offset, + UpdatedOwnerType, + UpdatedOwnerTypeArgs +); + +create_updated_offset!( + create_updated_string_offset, + UpdatedString, + UpdatedStringArgs, + create_string_offset +); +create_updated_offset!( + create_updated_pubkey_offset, + UpdatedPubkey, + UpdatedPubkeyArgs, + create_vector_offset +); +create_updated_offset!( + create_updated_optional_pubkey_offset, + UpdatedOptionalPubkey, + UpdatedOptionalPubkeyArgs, + create_vector_offset +); + +fn create_updated_creators_offset<'a>( + builder: &mut flatbuffers::FlatBufferBuilder<'a>, + updated: &fb::UpdatedCreators<'a>, +) -> flatbuffers::WIPOffset> { + let update_version_offset = updated.update_version().map(|uv| { + fb::UpdateVersion::create( + builder, + &fb::UpdateVersionArgs { + version_type: uv.version_type(), + version_value: uv.version_value(), + }, + ) + }); + + let value_offset = if let Some(creator_original) = updated.value() { + let mut creators = Vec::with_capacity(creator_original.len()); + for creator in &creator_original { + let pubkey_fb = creator.creator().map(|c| builder.create_vector(c.bytes())); + + let creator_fb = fb::Creator::create( + builder, + &fb::CreatorArgs { + creator: pubkey_fb, + creator_verified: creator.creator_verified(), + creator_share: creator.creator_share(), + }, + ); + creators.push(creator_fb); + } + Some(builder.create_vector(creators.as_slice())) + } else { + None + }; + + fb::UpdatedCreators::create( + builder, + &fb::UpdatedCreatorsArgs { + slot_updated: updated.slot_updated(), + update_version: update_version_offset, + value: value_offset, + }, + ) +} + +pub fn merge_complete_details_fb_simplified( + new_key: &[u8], + existing_val: Option<&[u8]>, + operands: &MergeOperands, +) -> Option> { + merge_complete_details_fb_simple_raw(new_key, existing_val, operands.iter()) +} + +#[derive(Clone)] +struct FbOwnerContainer<'a> { + pubkey: Option>, + owner: Option>, + delegate: Option>, + owner_type: Option>, + owner_delegate_seq: Option>, + is_current_owner: Option>, +} + +impl<'a> From> for FbOwnerContainer<'a> { + fn from(value: fb::AssetOwner<'a>) -> Self { + Self { + pubkey: value.pubkey(), + owner: value.owner(), + delegate: value.delegate(), + owner_type: value.owner_type(), + owner_delegate_seq: value.owner_delegate_seq(), + is_current_owner: value.is_current_owner(), + } + } +} + +pub fn merge_complete_details_fb_simple_raw<'a>( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: impl Iterator, +) -> Option> { + let existing_val = existing_val.and_then(|bytes| { + fb::root_as_asset_complete_details(bytes) + .map_err(|e| { + error!( + "RocksDB: AssetCompleteDetails deserialize existing_val: {}", + e + ) + }) + .ok() + }); + let mut pk = existing_val.and_then(|a| a.pubkey()); + let mut static_details = existing_val.and_then(|a| a.static_details()); + // creating a copy of every single field of the rest of the asset fields including the pubkeys to properly select the latest ones and reconstruct the asset + let mut dynamic_details_pubkey = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.pubkey()); + let mut dynamic_details_is_compressible = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.is_compressible()); + let mut dynamic_details_is_compressed = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.is_compressed()); + let mut dynamic_details_is_frozen = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.is_frozen()); + let mut dynamic_details_supply = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.supply()); + let mut dynamic_details_seq = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.seq()); + let mut dynamic_details_is_burnt = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.is_burnt()); + let mut dynamic_details_was_decompressed = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.was_decompressed()); + let mut dynamic_details_onchain_data = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.onchain_data()); + let mut dynamic_details_creators = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.creators()); + let mut dynamic_details_royalty_amount = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.royalty_amount()); + let mut dynamic_details_url = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.url()); + let mut dynamic_details_chain_mutability = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.chain_mutability()); + let mut dynamic_details_lamports = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.lamports()); + let mut dynamic_details_executable = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.executable()); + let mut dynamic_details_metadata_owner = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.metadata_owner()); + let mut dynamic_details_raw_name = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.raw_name()); + let mut dynamic_details_mpl_core_plugins = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.mpl_core_plugins()); + let mut dynamic_details_mpl_core_unknown_plugins = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.mpl_core_unknown_plugins()); + let mut dynamic_details_rent_epoch = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.rent_epoch()); + let mut dynamic_details_num_minted = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.num_minted()); + let mut dynamic_details_current_size = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.current_size()); + let mut dynamic_details_plugins_json_version = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.plugins_json_version()); + let mut dynamic_details_mpl_core_external_plugins = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.mpl_core_external_plugins()); + let mut dynamic_details_mpl_core_unknown_external_plugins = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.mpl_core_unknown_external_plugins()); + let mut dynamic_details_mint_extensions = existing_val + .and_then(|a| a.dynamic_details()) + .and_then(|d| d.mint_extensions()); + let mut authority = existing_val.and_then(|a| a.authority()); + let mut owner_pubkey = existing_val + .and_then(|a| a.owner()) + .and_then(|d| d.pubkey()); + let mut owner_owner = existing_val.and_then(|a| a.owner()).and_then(|d| d.owner()); + let mut owner_delegate = existing_val + .and_then(|a| a.owner()) + .and_then(|d| d.delegate()); + let mut owner_owner_type = existing_val + .and_then(|a| a.owner()) + .and_then(|d| d.owner_type()); + let mut owner_owner_delegate_seq = existing_val + .and_then(|a| a.owner()) + .and_then(|d| d.owner_delegate_seq()); + let mut owner_is_current_owner = existing_val + .and_then(|a| a.owner()) + .and_then(|d| d.is_current_owner()); + let mut collection_pubkey = existing_val + .and_then(|a| a.collection()) + .and_then(|d| d.pubkey()); + let mut collection_collection = existing_val + .and_then(|a| a.collection()) + .and_then(|d| d.collection()); + let mut collection_is_collection_verified = existing_val + .and_then(|a| a.collection()) + .and_then(|d| d.is_collection_verified()); + let mut collection_authority = existing_val + .and_then(|a| a.collection()) + .and_then(|d| d.authority()); + let mut specification_asset_class = existing_val + .and_then(|a| a.static_details()) + .map(|d| d.specification_asset_class()); + + // With the owner the merge is a bit more complex because we need to check if the owner is a new owner or an existing one + // The following cases are possible: + // 1. The owner in operand is None, in this case we don't need to update the owner + // 2. The owner in operand is Some, but the existing owner is None, in this case we set the new owner + // 3. Both the owner in operand and the existing owner are Some, in this case we have several subcases: + // 3.1. The owner in operand has the same pubkey as the existing owner, in this case we need to merge the owner fields and no updates to the other known owners is needed (TODO: alternative is to update the owner with the latest fields and merge the new/old owner with the other known owners based on the owner value) + // 3.2. The owner in operand has a different pubkey than the existing owner, in this case we need to check if the new owner is newer than the existing owner and update the owner fields accordingly. The newer owner should be set as the owner and the other known owners should be merged with the older owner fields + // The same owner pubkey in both the main owner field, or in the other known owners should be merged based on those: first we do compare the update_version and if the update_version is the same we compare the slot_updated + // For different pubkeys we first merge the owners with matching record in the other known owners field. If there is no match we add the new owner to the other known owners field and compare the slot updated to determine the latest owner with the is_current_owner set to true. If the slot_updated is the same we declare the owner with the is_current_owner = true. If both have the same slot_updated and is_current_owner we keep the existing owner, putting the other on top of the other known owners. + // If after appending to the other known owners the vector is bigger than 10, we remove the oldest elements from the vector to keep it at 10 elements, unless the oldest element has the same slot_updated as the current owner, in this case we keep everything as is. + + // Collect existing owner and other known owners + // let mut owner = existing_val.and_then(|a| a.owner()); + let mut other_known_owners = existing_val + .and_then(|a| a.other_known_owners()) + .map(|vec| { + vec.iter() + .map(|v| (v.pubkey().map(|k| k.bytes()).unwrap_or_default(), v.into())) + .collect::>() + }) + .unwrap_or_else(HashMap::new); + for op in operands { + if let Ok(new_val) = fb::root_as_asset_complete_details(op) { + if pk.is_none() { + pk = new_val.pubkey(); + } + // Keep existing static_details if present, but if the exising asset class was fungible and the new asset class is one of the NFT types, update the asset class as it's a known case for TokenMetadata + match static_details { + Some(existing_static_details) => { + if let Some(new_static_details) = new_val.static_details() { + if (existing_static_details.specification_asset_class() + == fb::SpecificationAssetClass::FungibleToken + || existing_static_details.specification_asset_class() + == fb::SpecificationAssetClass::FungibleAsset) + && (new_static_details.specification_asset_class() + == fb::SpecificationAssetClass::Nft + || new_static_details.specification_asset_class() + == fb::SpecificationAssetClass::ProgrammableNft) + { + specification_asset_class = + Some(new_static_details.specification_asset_class()); + } + } + } + None => { + static_details = new_val.static_details(); + } + } + // Merge dynamic_details + if let Some(new_dynamic_details) = new_val.dynamic_details() { + if dynamic_details_pubkey.is_none() { + dynamic_details_pubkey = new_dynamic_details.pubkey() + } + merge_field( + &mut dynamic_details_is_compressible, + new_dynamic_details.is_compressible(), + ); + merge_field( + &mut dynamic_details_is_compressed, + new_dynamic_details.is_compressed(), + ); + merge_field( + &mut dynamic_details_is_frozen, + new_dynamic_details.is_frozen(), + ); + merge_field(&mut dynamic_details_supply, new_dynamic_details.supply()); + merge_field(&mut dynamic_details_seq, new_dynamic_details.seq()); + merge_field( + &mut dynamic_details_is_burnt, + new_dynamic_details.is_burnt(), + ); + merge_field( + &mut dynamic_details_was_decompressed, + new_dynamic_details.was_decompressed(), + ); + merge_field( + &mut dynamic_details_onchain_data, + new_dynamic_details.onchain_data(), + ); + merge_field( + &mut dynamic_details_creators, + new_dynamic_details.creators(), + ); + merge_field( + &mut dynamic_details_royalty_amount, + new_dynamic_details.royalty_amount(), + ); + merge_field(&mut dynamic_details_url, new_dynamic_details.url()); + merge_field( + &mut dynamic_details_chain_mutability, + new_dynamic_details.chain_mutability(), + ); + merge_field( + &mut dynamic_details_lamports, + new_dynamic_details.lamports(), + ); + merge_field( + &mut dynamic_details_executable, + new_dynamic_details.executable(), + ); + merge_field( + &mut dynamic_details_metadata_owner, + new_dynamic_details.metadata_owner(), + ); + merge_field( + &mut dynamic_details_raw_name, + new_dynamic_details.raw_name(), + ); + merge_field( + &mut dynamic_details_mpl_core_plugins, + new_dynamic_details.mpl_core_plugins(), + ); + merge_field( + &mut dynamic_details_mpl_core_unknown_plugins, + new_dynamic_details.mpl_core_unknown_plugins(), + ); + merge_field( + &mut dynamic_details_rent_epoch, + new_dynamic_details.rent_epoch(), + ); + merge_field( + &mut dynamic_details_num_minted, + new_dynamic_details.num_minted(), + ); + merge_field( + &mut dynamic_details_current_size, + new_dynamic_details.current_size(), + ); + merge_field( + &mut dynamic_details_plugins_json_version, + new_dynamic_details.plugins_json_version(), + ); + merge_field( + &mut dynamic_details_mpl_core_external_plugins, + new_dynamic_details.mpl_core_external_plugins(), + ); + merge_field( + &mut dynamic_details_mpl_core_unknown_external_plugins, + new_dynamic_details.mpl_core_unknown_external_plugins(), + ); + merge_field( + &mut dynamic_details_mint_extensions, + new_dynamic_details.mint_extensions(), + ); + } + // Merge authority + if let Some(new_authority) = new_val.authority() { + if authority.map_or(true, |current_authority| { + new_authority.compare(¤t_authority) == Ordering::Greater + }) { + authority = Some(new_authority); + } + } + // Merge owner + if let Some(new_owner) = new_val.owner() { + if let Some(new_owner_pubkey) = new_owner.pubkey() { + // handle the case when the existing owner is missing + if owner_pubkey.is_none() { + owner_pubkey = new_owner.pubkey(); + owner_owner = new_owner.owner(); + owner_delegate = new_owner.delegate(); + owner_owner_type = new_owner.owner_type(); + owner_owner_delegate_seq = new_owner.owner_delegate_seq(); + owner_is_current_owner = new_owner.is_current_owner(); + } else { + // if the owner pubkey is the same we merge the owner fields + if owner_pubkey.map(|k| k.bytes()) == new_owner.pubkey().map(|k| k.bytes()) + { + merge_field(&mut owner_owner, new_owner.owner()); + merge_field(&mut owner_delegate, new_owner.delegate()); + merge_field(&mut owner_owner_type, new_owner.owner_type()); + merge_field( + &mut owner_owner_delegate_seq, + new_owner.owner_delegate_seq(), + ); + merge_field(&mut owner_is_current_owner, new_owner.is_current_owner()); + // after merging the owner fields we may end up with an account that has the is_current_owner set to false, in this case we need to check for an account with is_current_owner set to true inside the other known owners and set it as the owner. We select the account with the highest slot_updated. The previous owner should be moved to the other known owners + if !owner_is_current_owner.map(|u| u.value()).unwrap_or(false) { + let best_current_owner_option = { + other_known_owners + .iter() + .filter_map(|(k, v)| { + v.is_current_owner.as_ref().filter(|u| u.value()).map( + |is_owner| { + (is_owner.slot_updated(), k.clone(), v.clone()) + }, + ) + }) + .max_by_key(|(slot, _, _)| *slot) + }; + if let Some((_, new_owner_key, new_current_owner)) = + best_current_owner_option + { + let previous_owner = FbOwnerContainer { + pubkey: owner_pubkey, + owner: owner_owner, + delegate: owner_delegate, + owner_type: owner_owner_type, + owner_delegate_seq: owner_owner_delegate_seq, + is_current_owner: owner_is_current_owner, + }; + other_known_owners + .insert(owner_pubkey.unwrap().bytes(), previous_owner); + owner_pubkey = new_current_owner.pubkey; + owner_owner = new_current_owner.owner; + owner_delegate = new_current_owner.delegate; + owner_owner_type = new_current_owner.owner_type; + owner_owner_delegate_seq = new_current_owner.owner_delegate_seq; + owner_is_current_owner = new_current_owner.is_current_owner; + other_known_owners.remove(new_owner_key); + } + } + } else { + // if the owner pubkey is different it might already be in the other known owners, first we merge it with the one from the other known owners + // then we check which one is newer and put the other one in the other known owners + + let mut merged_owner = FbOwnerContainer::from(new_owner); + if let Some(oldish_owner) = + other_known_owners.get(new_owner_pubkey.bytes()) + { + let mut oldish_owner = oldish_owner.clone(); + merge_field(&mut oldish_owner.owner, new_owner.owner()); + merge_field(&mut oldish_owner.delegate, new_owner.delegate()); + merge_field(&mut oldish_owner.owner_type, new_owner.owner_type()); + merge_field( + &mut oldish_owner.owner_delegate_seq, + new_owner.owner_delegate_seq(), + ); + merge_field( + &mut oldish_owner.is_current_owner, + new_owner.is_current_owner(), + ); + + merged_owner = oldish_owner; + } + other_known_owners + .insert(new_owner_pubkey.bytes(), merged_owner.clone()); + + // now the merged owner holds the merged data. We need to check if it's marked as the current owner and if it's newer than the current owner + // if it doesn't have the is current owner set to true we don't need to do anything + if merged_owner + .is_current_owner + .map(|u| u.value()) + .unwrap_or(false) + { + if !owner_is_current_owner.map(|u| u.value()).unwrap_or(false) + || merged_owner + .is_current_owner + .map(|u| u.slot_updated()) + .unwrap_or_default() + > owner_is_current_owner + .map(|u| u.slot_updated()) + .unwrap_or_default() + { + // if the merged owner is newer we set it as the owner and move the old owner into the other known owners + let previous_owner = FbOwnerContainer { + pubkey: owner_pubkey, + owner: owner_owner, + delegate: owner_delegate, + owner_type: owner_owner_type, + owner_delegate_seq: owner_owner_delegate_seq, + is_current_owner: owner_is_current_owner, + }; + other_known_owners + .insert(owner_pubkey.unwrap().bytes(), previous_owner); + owner_pubkey = merged_owner.pubkey; + owner_owner = merged_owner.owner; + owner_delegate = merged_owner.delegate; + owner_owner_type = merged_owner.owner_type; + owner_owner_delegate_seq = merged_owner.owner_delegate_seq; + owner_is_current_owner = merged_owner.is_current_owner; + other_known_owners.remove(new_owner_pubkey.bytes()); + } + } + } + } + } + } + + // Merge collection + if let Some(new_collection) = new_val.collection() { + if collection_pubkey.is_none() { + collection_pubkey = new_collection.pubkey(); + } + merge_field(&mut collection_collection, new_collection.collection()); + merge_field( + &mut collection_is_collection_verified, + new_collection.is_collection_verified(), + ); + merge_field(&mut collection_authority, new_collection.authority()); + } + } + } + pk?; + let mut builder = FlatBufferBuilder::with_capacity(2500); + + let pk: Option>> = + pk.map(|k| builder.create_vector(k.bytes())); + let static_details = static_details.map(|s| { + let args = fb::AssetStaticDetailsArgs { + pubkey: s.pubkey().map(|k| builder.create_vector(k.bytes())), + specification_asset_class: specification_asset_class + .unwrap_or(s.specification_asset_class()), + royalty_target_type: s.royalty_target_type(), + created_at: s.created_at(), + edition_address: s + .edition_address() + .map(|k| builder.create_vector(k.bytes())), + }; + fb::AssetStaticDetails::create(&mut builder, &args) + }); + + let dynamic_details = dynamic_details_pubkey.map(|d| { + let args = fb::AssetDynamicDetailsArgs { + pubkey: Some(builder.create_vector(d.bytes())), + is_compressible: dynamic_details_is_compressible + .map(|u| create_updated_bool_offset(&mut builder, &u)), + is_compressed: dynamic_details_is_compressed + .map(|u| create_updated_bool_offset(&mut builder, &u)), + is_frozen: dynamic_details_is_frozen + .map(|u| create_updated_bool_offset(&mut builder, &u)), + supply: dynamic_details_supply.map(|u| create_updated_u64_offset(&mut builder, &u)), + seq: dynamic_details_seq.map(|u| create_updated_u64_offset(&mut builder, &u)), + is_burnt: dynamic_details_is_burnt + .map(|u| create_updated_bool_offset(&mut builder, &u)), + was_decompressed: dynamic_details_was_decompressed + .map(|u| create_updated_bool_offset(&mut builder, &u)), + onchain_data: dynamic_details_onchain_data + .map(|u| create_updated_string_offset(&mut builder, &u)), + creators: dynamic_details_creators + .map(|u| create_updated_creators_offset(&mut builder, &u)), + royalty_amount: dynamic_details_royalty_amount + .map(|u| create_updated_u32_offset(&mut builder, &u)), + url: dynamic_details_url.map(|u| create_updated_string_offset(&mut builder, &u)), + chain_mutability: dynamic_details_chain_mutability + .map(|u| create_updated_chain_mutability_offset(&mut builder, &u)), + lamports: dynamic_details_lamports.map(|u| create_updated_u64_offset(&mut builder, &u)), + executable: dynamic_details_executable + .map(|u| create_updated_bool_offset(&mut builder, &u)), + metadata_owner: dynamic_details_metadata_owner + .map(|u| create_updated_string_offset(&mut builder, &u)), + raw_name: dynamic_details_raw_name + .map(|u| create_updated_string_offset(&mut builder, &u)), + mpl_core_plugins: dynamic_details_mpl_core_plugins + .map(|u| create_updated_string_offset(&mut builder, &u)), + mpl_core_unknown_plugins: dynamic_details_mpl_core_unknown_plugins + .map(|u| create_updated_string_offset(&mut builder, &u)), + rent_epoch: dynamic_details_rent_epoch + .map(|u| create_updated_u64_offset(&mut builder, &u)), + num_minted: dynamic_details_num_minted + .map(|u| create_updated_u32_offset(&mut builder, &u)), + current_size: dynamic_details_current_size + .map(|u| create_updated_u32_offset(&mut builder, &u)), + plugins_json_version: dynamic_details_plugins_json_version + .map(|u| create_updated_u32_offset(&mut builder, &u)), + mpl_core_external_plugins: dynamic_details_mpl_core_external_plugins + .map(|u| create_updated_string_offset(&mut builder, &u)), + mpl_core_unknown_external_plugins: dynamic_details_mpl_core_unknown_external_plugins + .map(|u| create_updated_string_offset(&mut builder, &u)), + mint_extensions: dynamic_details_mint_extensions + .map(|u| create_updated_string_offset(&mut builder, &u)), + }; + fb::AssetDynamicDetails::create(&mut builder, &args) + }); + let authority = authority.map(|a| { + let write_version = unsafe { + a._tab + .get::(fb::AssetAuthority::VT_WRITE_VERSION, None) + }; + let auth = a.authority().map(|x| builder.create_vector(x.bytes())); + let pk = a.pubkey().map(|x| builder.create_vector(x.bytes())); + let mut auth_builder = fb::AssetAuthorityBuilder::new(&mut builder); + if let Some(wv) = write_version { + auth_builder.add_write_version(wv); + } + auth_builder.add_slot_updated(a.slot_updated()); + if let Some(x) = auth { + auth_builder.add_authority(x); + } + if let Some(x) = pk { + auth_builder.add_pubkey(x); + } + auth_builder.finish() + }); + + let owner = owner_pubkey.map(|k| { + let args = fb::AssetOwnerArgs { + pubkey: Some(builder.create_vector(k.bytes())), + owner: owner_owner.map(|u| create_updated_optional_pubkey_offset(&mut builder, &u)), + delegate: owner_delegate + .map(|u| create_updated_optional_pubkey_offset(&mut builder, &u)), + owner_type: owner_owner_type + .map(|u| create_updated_owner_type_offset(&mut builder, &u)), + owner_delegate_seq: owner_owner_delegate_seq + .map(|u| create_updated_u64_offset(&mut builder, &u)), + is_current_owner: owner_is_current_owner + .map(|u| create_updated_bool_offset(&mut builder, &u)), + }; + fb::AssetOwner::create(&mut builder, &args) + }); + let collection = collection_pubkey.map(|c| { + let args = fb::AssetCollectionArgs { + pubkey: Some(builder.create_vector(c.bytes())), + collection: collection_collection + .map(|u| create_updated_pubkey_offset(&mut builder, &u)), + is_collection_verified: collection_is_collection_verified + .map(|u| create_updated_bool_offset(&mut builder, &u)), + authority: collection_authority + .map(|u| create_updated_optional_pubkey_offset(&mut builder, &u)), + }; + fb::AssetCollection::create(&mut builder, &args) + }); + // + + // Create other_known_owners offset + let other_known_owners_offset = if !other_known_owners.is_empty() { + // Get the updated_slot of the main owner + let owner_owner_updated_slot = owner_owner.as_ref().map(|u| u.slot_updated()).unwrap_or(0); + + // Collect and sort the other known owners + let mut owners_vec: Vec<_> = other_known_owners.values().collect(); + + // Sort the owners by `owner.slot_updated()` descending + owners_vec.sort_by(|a, b| { + let a_slot = a.owner.as_ref().map(|u| u.slot_updated()).unwrap_or(0); + let b_slot = b.owner.as_ref().map(|u| u.slot_updated()).unwrap_or(0); + + // Compare slots in descending order + b_slot.cmp(&a_slot).then_with(|| { + // If slots are equal, compare pubkeys in ascending order + let a_pubkey = a.pubkey.as_ref().map(|k| k.bytes()); + let b_pubkey = b.pubkey.as_ref().map(|k| k.bytes()); + + a_pubkey.cmp(&b_pubkey) + }) + }); + + // Initialize a vector to hold the selected owners + let mut offsets = Vec::new(); + + let top_slot_to_keep = max( + owner_owner_updated_slot, + owners_vec + .first() + .and_then(|o| o.owner) + .map(|o| o.slot_updated()) + .unwrap_or(0), + ); + // Iterate through the sorted owners + for owner in owners_vec { + let owner_slot = owner.owner.as_ref().map(|u| u.slot_updated()).unwrap_or(0); + if (owner_slot < top_slot_to_keep) && (offsets.len() >= MAX_OTHER_OWNERS) { + // Reached MAX_OTHER_OWNERS limit + break; + } + offsets.push(create_owner_offset(&mut builder, owner)); + } + Some(builder.create_vector(&offsets)) + } else { + None + }; + + let res = fb::AssetCompleteDetails::create( + &mut builder, + &fb::AssetCompleteDetailsArgs { + pubkey: pk, + static_details, + dynamic_details, + authority, + owner, + collection, + other_known_owners: other_known_owners_offset, + }, + ); + builder.finish_minimal(res); + Some(builder.finished_data().to_vec()) +} + +fn create_owner_offset<'a>( + builder: &mut FlatBufferBuilder<'a>, + owner: &FbOwnerContainer<'a>, +) -> WIPOffset> { + let pubkey_offset = owner.pubkey.map(|k| builder.create_vector(k.bytes())); + + let owner_field = owner + .owner + .map(|o| create_updated_optional_pubkey_offset(builder, &o)); + + let delegate_field = owner + .delegate + .map(|d| create_updated_optional_pubkey_offset(builder, &d)); + + let owner_type_field = owner + .owner_type + .map(|ot| create_updated_owner_type_offset(builder, &ot)); + + let owner_delegate_seq_field = owner + .owner_delegate_seq + .map(|seq| create_updated_u64_offset(builder, &seq)); + + let is_current_owner_field = owner + .is_current_owner + .map(|ico| create_updated_bool_offset(builder, &ico)); + + fb::AssetOwner::create( + builder, + &fb::AssetOwnerArgs { + pubkey: pubkey_offset, + owner: owner_field, + delegate: delegate_field, + owner_type: owner_type_field, + owner_delegate_seq: owner_delegate_seq_field, + is_current_owner: is_current_owner_field, + }, + ) +} + +fn merge_field<'a, T>(existing_field: &mut Option, new_field: Option) +where + T: PartialOrd + 'a, +{ + if let Some(new_val) = new_field { + match existing_field { + None => { + *existing_field = Some(new_val); + } + Some(existing_value) => { + if new_val.partial_cmp(existing_value) == Some(std::cmp::Ordering::Greater) { + *existing_field = Some(new_val); + } + } + } + } +} + +pub fn merge_complete_details_fb_raw<'a>( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: impl Iterator, +) -> Option> { + let mut builder = FlatBufferBuilder::with_capacity(2500); + // Deserialize existing value into an iterator + let existing_iter = existing_val + .and_then(|bytes| { + fb::root_as_asset_complete_details(bytes) + .map_err(|e| { + error!( + "RocksDB: AssetCompleteDetails deserialize existing_val: {}", + e + ) + }) + .ok() + }) + .into_iter(); + + // Deserialize operands into an iterator + let operands_iter = operands.filter_map(|bytes| fb::root_as_asset_complete_details(bytes).ok()); + + // Combine existing and operands into a single iterator + let all_assets: Vec<_> = existing_iter.chain(operands_iter).collect(); + + let pubkey = all_assets + .iter() + .filter_map(|asset| asset.pubkey()) + .next() + .map(|k| builder.create_vector(k.bytes())); + pubkey?; + + let static_details = merge_static_details( + &mut builder, + all_assets + .iter() + .filter_map(|a| a.static_details()) + .collect(), + ); + let dynamic_details = merge_dynamic_details( + &mut builder, + all_assets + .iter() + .filter_map(|a| a.dynamic_details()) + .collect(), + ); + let authority = merge_authority( + &mut builder, + all_assets.iter().filter_map(|a| a.authority()).collect(), + ); + let owner = merge_owner( + &mut builder, + all_assets.iter().filter_map(|a| a.owner()).collect(), + ); + let collection = merge_collection( + &mut builder, + all_assets.iter().filter_map(|a| a.collection()).collect(), + ); + let res = fb::AssetCompleteDetails::create( + &mut builder, + &fb::AssetCompleteDetailsArgs { + pubkey, + static_details, + dynamic_details, + authority, + owner, + collection, + other_known_owners: None, // todo: if this ever used, we need to implement it + }, + ); + builder.finish_minimal(res); + Some(builder.finished_data().to_vec()) +} + +fn merge_static_details<'a>( + builder: &mut FlatBufferBuilder<'a>, + iter: Vec>, +) -> Option>> { + let pk = iter + .iter() + .cloned() + .filter_map(|asset| asset.pubkey()) + .next() + .map(|k| builder.create_vector(k.bytes())); + pk?; + let args = fb::AssetStaticDetailsArgs { + pubkey: pk, + specification_asset_class: iter + .iter() + .cloned() + .map(|asset| asset.specification_asset_class()) + .next() + .unwrap_or_default(), + royalty_target_type: iter + .iter() + .cloned() + .map(|asset| asset.royalty_target_type()) + .next() + .unwrap_or_default(), + created_at: iter + .iter() + .cloned() + .map(|asset| asset.created_at()) + .next() + .unwrap_or_default(), + edition_address: iter + .iter() + .cloned() + .filter_map(|asset| asset.edition_address()) + .next() + .map(|k| builder.create_vector(k.bytes())), + }; + Some(fb::AssetStaticDetails::create(builder, &args)) +} + +macro_rules! merge_updated_primitive { + ($func_name:ident, $updated_type:ident, $updated_args:ident) => { + fn $func_name<'a, T, F>( + builder: &mut flatbuffers::FlatBufferBuilder<'a>, + iter: impl Iterator + DoubleEndedIterator, + extract_fn: F, + ) -> Option>> + where + F: Fn(T) -> Option>, + T: 'a, + { + iter.filter_map(extract_fn) + .rev() // Reverse the iterator for max_by to get the first-most element for the case of multiple equal values + .max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)) + .map(|v| { + let version_offset = v.update_version().map(|uv| { + fb::UpdateVersion::create( + builder, + &fb::UpdateVersionArgs { + version_type: uv.version_type(), + version_value: uv.version_value(), + }, + ) + }); + fb::$updated_type::create( + builder, + &fb::$updated_args { + slot_updated: v.slot_updated(), + update_version: version_offset, + value: v.value(), + }, + ) + }) + } + }; +} + +macro_rules! merge_updated_offset { + ($func_name:ident, $updated_type:ident, $updated_args:ident, $value_create_fn:path) => { + fn $func_name<'a, T, F>( + builder: &mut flatbuffers::FlatBufferBuilder<'a>, + iter: impl Iterator + DoubleEndedIterator, + extract_fn: F, + ) -> Option>> + where + F: Fn(T) -> Option>, + T: 'a, + { + iter.filter_map(extract_fn) + .rev() // Reverse the iterator for max_by to get the first-most element for the case of multiple equal values + .max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)) + .map(|v| { + let version_offset = v.update_version().map(|uv| { + fb::UpdateVersion::create( + builder, + &fb::UpdateVersionArgs { + version_type: uv.version_type(), + version_value: uv.version_value(), + }, + ) + }); + let value_offset = v.value().map(|value| $value_create_fn(builder, value)); + fb::$updated_type::create( + builder, + &fb::$updated_args { + slot_updated: v.slot_updated(), + update_version: version_offset, + value: value_offset, + }, + ) + }) + } + }; +} + +merge_updated_primitive!(merge_updated_bool, UpdatedBool, UpdatedBoolArgs); +merge_updated_primitive!(merge_updated_u64, UpdatedU64, UpdatedU64Args); +merge_updated_primitive!(merge_updated_u32, UpdatedU32, UpdatedU32Args); +merge_updated_primitive!( + merge_updated_chain_mutability, + UpdatedChainMutability, + UpdatedChainMutabilityArgs +); +merge_updated_primitive!( + merge_updated_owner_type, + UpdatedOwnerType, + UpdatedOwnerTypeArgs +); +merge_updated_offset!( + merge_updated_string, + UpdatedString, + UpdatedStringArgs, + create_string_offset +); +fn create_string_offset<'a>( + builder: &mut flatbuffers::FlatBufferBuilder<'a>, + value: &str, +) -> flatbuffers::WIPOffset<&'a str> { + builder.create_string(value) +} + +fn create_vector_offset<'a>( + builder: &mut flatbuffers::FlatBufferBuilder<'a>, + value: flatbuffers::Vector<'a, u8>, +) -> flatbuffers::WIPOffset> { + builder.create_vector(value.bytes()) +} +merge_updated_offset!( + merge_updated_pubkey, + UpdatedPubkey, + UpdatedPubkeyArgs, + create_vector_offset +); +merge_updated_offset!( + merge_updated_optional_pubkey, + UpdatedOptionalPubkey, + UpdatedOptionalPubkeyArgs, + create_vector_offset +); + +fn merge_updated_creators<'a, T, F>( + builder: &mut flatbuffers::FlatBufferBuilder<'a>, + iter: impl Iterator + DoubleEndedIterator, + extract_fn: F, +) -> Option>> +where + F: Fn(T) -> Option>, + T: 'a, +{ + iter.filter_map(extract_fn) + .rev() // Reverse the iterator for max_by to get the first-most element for the case of multiple equal values + .max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)) + .map(|v| { + // Create UpdateVersion + let version_offset = v.update_version().map(|uv| { + fb::UpdateVersion::create( + builder, + &fb::UpdateVersionArgs { + version_type: uv.version_type(), + version_value: uv.version_value(), + }, + ) + }); + let creators_fb = if let Some(creator_original) = v.value() { + let mut creators = Vec::with_capacity(creator_original.len()); + for creator in &creator_original { + let pubkey_fb = creator.creator().map(|c| builder.create_vector(c.bytes())); + + let creator_fb = fb::Creator::create( + builder, + &fb::CreatorArgs { + creator: pubkey_fb, + creator_verified: creator.creator_verified(), + creator_share: creator.creator_share(), + }, + ); + creators.push(creator_fb); + } + Some(builder.create_vector(creators.as_slice())) + } else { + None + }; + + // Create UpdatedCreators + fb::UpdatedCreators::create( + builder, + &fb::UpdatedCreatorsArgs { + slot_updated: v.slot_updated(), + update_version: version_offset, + value: creators_fb, + }, + ) + }) +} + +fn merge_dynamic_details<'a>( + builder: &mut FlatBufferBuilder<'a>, + iter: Vec>, +) -> Option>> { + let pk = iter + .iter() + .cloned() + .filter_map(|asset| asset.pubkey()) + .next() + .map(|k| builder.create_vector(k.bytes())); + pk?; + let is_compressible = merge_updated_bool(builder, iter.iter().cloned(), |asset| { + asset.is_compressible() + }); + let is_compressed = + merge_updated_bool(builder, iter.iter().cloned(), |asset| asset.is_compressed()); + let is_frozen = merge_updated_bool(builder, iter.iter().cloned(), |asset| asset.is_frozen()); + let supply = merge_updated_u64(builder, iter.iter().cloned(), |asset| asset.supply()); + let seq = merge_updated_u64(builder, iter.iter().cloned(), |asset| asset.seq()); + let is_burnt = merge_updated_bool(builder, iter.iter().cloned(), |asset| asset.is_burnt()); + let was_decompressed = merge_updated_bool(builder, iter.iter().cloned(), |asset| { + asset.was_decompressed() + }); + let onchain_data = + merge_updated_string(builder, iter.iter().cloned(), |asset| asset.onchain_data()); + let creators = merge_updated_creators(builder, iter.iter().cloned(), |a| a.creators()); + let royalty_amount = merge_updated_u32(builder, iter.iter().cloned(), |asset| { + asset.royalty_amount() + }); + let url = merge_updated_string(builder, iter.iter().cloned(), |asset| asset.url()); + let chain_mutability = merge_updated_chain_mutability(builder, iter.iter().cloned(), |asset| { + asset.chain_mutability() + }); + let lamports = merge_updated_u64(builder, iter.iter().cloned(), |asset| asset.lamports()); + let executable = merge_updated_bool(builder, iter.iter().cloned(), |asset| asset.executable()); + let metadata_owner = merge_updated_string(builder, iter.iter().cloned(), |asset| { + asset.metadata_owner() + }); + let raw_name = merge_updated_string(builder, iter.iter().cloned(), |asset| asset.raw_name()); + let mpl_core_plugins = merge_updated_string(builder, iter.iter().cloned(), |asset| { + asset.mpl_core_plugins() + }); + let mpl_core_unknown_plugins = merge_updated_string(builder, iter.iter().cloned(), |asset| { + asset.mpl_core_unknown_plugins() + }); + let rent_epoch = merge_updated_u64(builder, iter.iter().cloned(), |asset| asset.rent_epoch()); + let num_minted = merge_updated_u32(builder, iter.iter().cloned(), |asset| asset.num_minted()); + let current_size = + merge_updated_u32(builder, iter.iter().cloned(), |asset| asset.current_size()); + let plugins_json_version = merge_updated_u32(builder, iter.iter().cloned(), |asset| { + asset.plugins_json_version() + }); + let mpl_core_external_plugins = merge_updated_string(builder, iter.iter().cloned(), |asset| { + asset.mpl_core_external_plugins() + }); + let mpl_core_unknown_external_plugins = + merge_updated_string(builder, iter.iter().cloned(), |asset| { + asset.mpl_core_unknown_external_plugins() + }); + let mint_extensions = merge_updated_string(builder, iter.iter().cloned(), |asset| { + asset.mint_extensions() + }); + + Some(fb::AssetDynamicDetails::create( + builder, + &fb::AssetDynamicDetailsArgs { + pubkey: pk, + is_compressible, + is_compressed, + is_frozen, + supply, + seq, + is_burnt, + was_decompressed, + onchain_data, + creators, + royalty_amount, + url, + chain_mutability, + lamports, + executable, + metadata_owner, + raw_name, + mpl_core_plugins, + mpl_core_unknown_plugins, + rent_epoch, + num_minted, + current_size, + plugins_json_version, + mpl_core_external_plugins, + mpl_core_unknown_external_plugins, + mint_extensions, + }, + )) +} + +fn merge_authority<'a>( + builder: &mut FlatBufferBuilder<'a>, + iter: Vec>, +) -> Option>> { + let pk = iter + .iter() + .cloned() + .filter_map(|asset| asset.pubkey()) + .next() + .map(|k| builder.create_vector(k.bytes())); + pk?; + iter.iter() + .cloned() + .rev() // Reverse the iterator for max_by to get the first-most element for the case of multiple equal values + .max_by(|a, b| { + if let (Some(a_write_version), Some(b_write_version)) = unsafe { + ( + a._tab + .get::(fb::AssetAuthority::VT_WRITE_VERSION, None), + b._tab + .get::(fb::AssetAuthority::VT_WRITE_VERSION, None), + ) + } { + a_write_version.cmp(&b_write_version) + } else { + a.slot_updated().cmp(&b.slot_updated()) + } + }) + .map(|authority_original| { + let write_version = unsafe { + authority_original + ._tab + .get::(fb::AssetAuthority::VT_WRITE_VERSION, None) + }; + let auth = authority_original + .authority() + .map(|x| builder.create_vector(x.bytes())); + let mut auth_builder = fb::AssetAuthorityBuilder::new(builder); + if let Some(wv) = write_version { + auth_builder.add_write_version(wv); + } + auth_builder.add_slot_updated(authority_original.slot_updated()); + if let Some(x) = auth { + auth_builder.add_authority(x); + } + if let Some(x) = pk { + auth_builder.add_pubkey(x); + } + auth_builder.finish() + }) +} + +fn merge_owner<'a>( + builder: &mut FlatBufferBuilder<'a>, + iter: Vec>, +) -> Option>> { + let pk = iter + .iter() + .cloned() + .filter_map(|owner| owner.pubkey()) + .next() + .map(|k| builder.create_vector(k.bytes())); + pk?; + let owner = merge_updated_optional_pubkey(builder, iter.iter().cloned(), |owner| owner.owner()); + let delegate = + merge_updated_optional_pubkey(builder, iter.iter().cloned(), |owner| owner.delegate()); + let owner_type = + merge_updated_owner_type(builder, iter.iter().cloned(), |owner| owner.owner_type()); + let owner_delegate_seq = merge_updated_u64(builder, iter.iter().cloned(), |owner| { + owner.owner_delegate_seq() + }); + let is_current_owner = merge_updated_bool(builder, iter.iter().cloned(), |owner| { + owner.is_current_owner() + }); + + Some(fb::AssetOwner::create( + builder, + &fb::AssetOwnerArgs { + pubkey: pk, + owner, + delegate, + owner_type, + owner_delegate_seq, + is_current_owner, + }, + )) +} +fn merge_collection<'a>( + builder: &mut FlatBufferBuilder<'a>, + iter: Vec>, +) -> Option>> { + let pk = iter + .iter() + .cloned() + .filter_map(|collection| collection.pubkey()) + .next() + .map(|k| builder.create_vector(k.bytes())); + pk?; + let collection = merge_updated_pubkey(builder, iter.iter().cloned(), |collection| { + collection.collection() + }); + let is_collection_verified = merge_updated_bool(builder, iter.iter().cloned(), |collection| { + collection.is_collection_verified() + }); + let authority = merge_updated_optional_pubkey(builder, iter.iter().cloned(), |collection| { + collection.authority() + }); + + Some(fb::AssetCollection::create( + builder, + &fb::AssetCollectionArgs { + pubkey: pk, + collection, + is_collection_verified, + authority, + }, + )) +} + +impl AssetDynamicDetails { + pub fn merge(&mut self, new_val: &Self) { + update_field(&mut self.is_compressible, &new_val.is_compressible); + update_field(&mut self.is_compressed, &new_val.is_compressed); + update_field(&mut self.is_frozen, &new_val.is_frozen); + update_optional_field(&mut self.supply, &new_val.supply); + update_optional_field(&mut self.seq, &new_val.seq); + update_field(&mut self.is_burnt, &new_val.is_burnt); + update_field(&mut self.creators, &new_val.creators); + update_field(&mut self.royalty_amount, &new_val.royalty_amount); + update_optional_field(&mut self.was_decompressed, &new_val.was_decompressed); + update_optional_field(&mut self.onchain_data, &new_val.onchain_data); + update_field(&mut self.url, &new_val.url); + update_optional_field(&mut self.chain_mutability, &new_val.chain_mutability); + update_optional_field(&mut self.lamports, &new_val.lamports); + update_optional_field(&mut self.executable, &new_val.executable); + update_optional_field(&mut self.metadata_owner, &new_val.metadata_owner); + update_optional_field(&mut self.raw_name, &new_val.raw_name); + update_optional_field(&mut self.mpl_core_plugins, &new_val.mpl_core_plugins); + update_optional_field( + &mut self.mpl_core_unknown_plugins, + &new_val.mpl_core_unknown_plugins, + ); + update_optional_field(&mut self.num_minted, &new_val.num_minted); + update_optional_field(&mut self.current_size, &new_val.current_size); + update_optional_field(&mut self.rent_epoch, &new_val.rent_epoch); + update_optional_field( + &mut self.plugins_json_version, + &new_val.plugins_json_version, + ); + update_optional_field( + &mut self.mpl_core_external_plugins, + &new_val.mpl_core_external_plugins, + ); + update_optional_field( + &mut self.mpl_core_unknown_external_plugins, + &new_val.mpl_core_unknown_external_plugins, + ); + update_optional_field(&mut self.mint_extensions, &new_val.mint_extensions); + } + + pub fn merge_dynamic_details( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: &MergeOperands, + ) -> Option> { + let mut result: Option = None; + + // Deserialize existing value if present + if let Some(existing_val) = existing_val { + match deserialize::(existing_val) { + Ok(value) => result = Some(value), + Err(e) => error!( + "RocksDB: AssetDynamicDetails deserialize existing_val: {}", + e + ), + } + } + + // Iterate over operands and merge + for op in operands { + match deserialize::(op) { + Ok(new_val) => { + if let Some(ref mut current_val) = result { + current_val.merge(&new_val); + } else { + result = Some(new_val); + } + } + Err(e) => error!("RocksDB: AssetDynamicDetails deserialize new_val: {}", e), + } + } + + // Serialize the result back into bytes + result.and_then(|result| serialize(&result).ok()) + } pub fn get_slot_updated(&self) -> u64 { [ self.is_compressible.slot_updated, self.is_compressed.slot_updated, self.is_frozen.slot_updated, - self.supply.clone().map_or(0, |supply| supply.slot_updated), - self.seq.clone().map_or(0, |seq| seq.slot_updated), + self.supply.as_ref().map_or(0, |supply| supply.slot_updated), + self.seq.as_ref().map_or(0, |seq| seq.slot_updated), self.is_burnt.slot_updated, - self.was_decompressed.slot_updated, + self.was_decompressed.as_ref().map_or(0, |was_decompressed| was_decompressed.slot_updated), self.onchain_data - .clone() + .as_ref() .map_or(0, |onchain_data| onchain_data.slot_updated), self.creators.slot_updated, self.royalty_amount.slot_updated, self.chain_mutability - .clone() + .as_ref() .map_or(0, |onchain_data| onchain_data.slot_updated), self.lamports - .clone() + .as_ref() .map_or(0, |onchain_data| onchain_data.slot_updated), self.executable - .clone() + .as_ref() .map_or(0, |onchain_data| onchain_data.slot_updated), self.metadata_owner - .clone() + .as_ref() .map_or(0, |onchain_data| onchain_data.slot_updated), ] .into_iter() @@ -470,6 +3129,19 @@ impl AssetDynamicDetails { } impl AssetAuthority { + pub fn merge(&mut self, new_val: &Self) { + if let (Some(self_write_version), Some(new_write_version)) = + (self.write_version, new_val.write_version) + { + if new_write_version > self_write_version { + *self = new_val.to_owned(); + } + } else if new_val.slot_updated > self.slot_updated { + *self = new_val.to_owned(); + } + // If neither condition is met, retain existing `self` + } + pub fn merge_asset_authorities( _new_key: &[u8], existing_val: Option<&[u8]>, @@ -543,8 +3215,14 @@ impl TypedColumn for AssetOwner { decode_pubkey(bytes) } } - impl AssetOwner { + pub fn merge(&mut self, new_val: &Self) { + update_field(&mut self.owner_type, &new_val.owner_type); + update_field(&mut self.owner, &new_val.owner); + update_field(&mut self.owner_delegate_seq, &new_val.owner_delegate_seq); + update_field(&mut self.delegate, &new_val.delegate); + } + pub fn merge_asset_owner( _new_key: &[u8], existing_val: Option<&[u8]>, @@ -693,6 +3371,15 @@ impl TypedColumn for AssetCollection { } impl AssetCollection { + pub fn merge(&mut self, new_val: &Self) { + update_field(&mut self.collection, &new_val.collection); + update_field( + &mut self.is_collection_verified, + &new_val.is_collection_verified, + ); + update_field(&mut self.authority, &new_val.authority); + } + pub fn merge_asset_collection( _new_key: &[u8], existing_val: Option<&[u8]>, @@ -714,13 +3401,7 @@ impl AssetCollection { match deserialize::(op) { Ok(new_val) => { result = Some(if let Some(mut current_val) = result { - update_field(&mut current_val.collection, &new_val.collection); - update_field( - &mut current_val.is_collection_verified, - &new_val.is_collection_verified, - ); - update_field(&mut current_val.authority, &new_val.authority); - + current_val.merge(&new_val); current_val } else { new_val @@ -843,3 +3524,535 @@ impl SlotAssetIdxKey { }) } } +#[cfg(test)] +mod tests { + + use super::*; + use entities::models::Creator; + use itertools::Itertools; + use std::{io::Read, str::FromStr}; + + const TEST_DATA:&str = "PBoyqt9suREfEjDdGGiAPvsZvpctTPWgKjqrPC59tBpm4Bz33NvdBTG6kWdV6i6XCxHTT1ZFYWY72tUw42zeyhPCWGGy2jA6KZr58rb7VS4Sv1gyRV1xRRUnzsaJQAT8BqSXE57xo3KQnQN1ptiwT9z85w1wq9sFztjUryUBsTQZgkoUZjNFM4gwJRMB2KxH5RpxQSWXx1oxgG68Hr9sr8jwjwoT8PD2Nua12LLJ7X9ik5CZtJFbAAUnpSwhKgLMhLMcriRDUuPCQAzwzu7L5vrFUdSVXXrRNetQR8TaAgg1bvi4FbCLe7a2Q2d3NuNT7WwF88ddxTGiLA38LiebjgNVw4TGNh1LyUQ6JkbEqTZ7UH8zsJe6vJtAGLDgfLiddECw8Xg2hVqWDkYVmJpTn9ozyNcL53upyEfG7SzTRkDepbHg1Pu27Vfaatj8friyoNYqpju8K9ZxNQcttjurrPs5Tid2TzR4SbfWEYvBchCgmxQLT3s4inRi9pigQgN8Xn1WHExfq9JzZLHcFhzxAB3HPy9nhHqjex2RzBigoEshiXuMmQCiFZa5zy8t6m67p9SiAhuUTn9hfcHHYjVqJbNsBaiT2zbDnqAMyKxRsnZT6Zf8cQfM4tRisHWm8x4EGkxDjkMfK89Zyzry579G8vJbCzG16hEAHS16ns7VySoTGQGwZKyLTUxBXnDr3PWTSNuaSTzQ8ykBNjQ3hPjkD4e2H5BTqLWdTrzqRsfXrqygGT64tqPH53YyyAnT4pPdHUXoi3F776seVR9SpJm3daiDAjA6f18eJTHXiAwYf28LmMquZ7m346pZc99ii6wmHVrczUoknyMNK5b2cju7hDPtXQdirdDxt3f37qJo89Dyzp9txkrFz7XqVhdqq9s4tuVYC1jHSMfMfWStDdPWVMT9R3FrjWUuG2gLznraqD7jxGfFaFps6LdeBibTHAnHuT6BqWXN9UUCfvW3x5oBcgwQU9Tj5yFoS7SPkyN8a6s2pmbVM1t2CJdRzaws52JasdAQyVeXZXxK8kkgrbKpcHkdiSr6JCepDgaLaPSxJTVzStsqXyYrwfApmybyGAL2oPQ6aFgopGh76gt81XaGCfDJU1MUrsmby5p5uer4LzQxHCwM1ZnLK5TrkVh2AfxYnPdJ98KtTHFDeJoJpcj5sJPKKfiu6nN24udVphndRrnrgYj1a47UiEGkogqz7pe6PJt4sCArWdS2nDquy7TuKkBJZPbetqSHAiaL4n2RD22N6trq7DtcF4fkPku2Yb6xXppHgVhM9PJEbK3kQnKm4VzTJvTx314LCRUwkVZrzYJ2MYFbB9gXYqyMPcGDLXn8hASKHegisZbxyjJLkmNWMx3jimjUfXNww6aDripnvNPZMB94XJZda5Wd2saQgDc3P9ijPA1geCEeGTEucNe3dJPTSz2jD7MQ6aTvT5uvREmKoWdcUbrn88oveVfCCzuvJGkXhnSSa77ubDgd4aa8eE23vuZ3nZ4ifAYAS7XkwUDSRBPN6A2tRuXpVbWHYpnqEGuDBNA59rL2JCo17HcZDCMXFTTcEjArjU2UHBNqi3cMtVP7gqW639i44bSFrrMk3CCzSN88xganXySAuo4TdVhmrLfhWQsnq5oFzCvYtPXLi6uvE2FBAMm4e5LqXt8HDL6z1AuVfija5uAdR6jSdVMyd67ju9LnLLWJie6y3k8rtGawGb7evzpR5DjVxHQgkFQu68s2ytFsHxjuRSuMxjzJyj1n9zR7psEY3L8P6kGFr3KtGQvBFXCbc9z9mcELxmRmHX6K467jF7EyYdGFZ8D2ATaqvvmLGzBqmpZBjzSFXsioMEZ1HEtXNCanFRVxNALhpfZSfbH5nKkY9PfEesU3ibjrT29F81rxuD8bpHXqePaCNgAoMTfM9KXa5HXJzYMxokS9bFcYPTXFhxWftZ8Ta78GEaEsLNKYY7DJ2rKjNJKjG6KQxUykAJTzJjBi4finu8WBKA99rGqSVeQo7kMnfdB7aEWusWUuCbbBDc8GVrLvS2paqE6vzD8wfrUiY11nNdzyVqA4ZmVh6wAxGn8frDa2hakSD9kDU7Q34doYA1ekyUMnKoPquEmAdvTL4KcmwsVrG2i5LRuTnvhjmwSSYSYDHCerHkzdQ7qrDEceAvvKUMwERySFTqPq9qZgXoGTvfgDCcgPPXauH41rDAjWmNSrLjJXAANvzqxEUq59NddTPM5cTCfwo8Edr5wweTYdCqjAr1cn3EGKBdhqi4QY3U68dAd4YFR7NAEcUGWyY4KxsCKzJmiNvjPnoitmezhRa1MACA4Bm9zxyDBaBga8EVBSmdDUS9izgZ1WGNi4ZRnJKQZFzw3dnHsfZmEDiUNxnN2wwGudWnYo1sAFTdWQkbfTHeTswRHCNzAsFXhuChmbzTnzfCvxrwjHh7oKvcANdYdEsgzNkFHcz5ebugABNDYaiYCYKMgzPnA7v4854gvfids1TfEBCM6reDysiddcnhU2MVvq34rPwTG3ESZeMAumKDvGVEX9tQnTPTrJEspvAUuspT21EKTSBYSXXYMmwYLvVotPjpMjuX9nHiKk6hnT7xqncNNkD8Gsj4tuFf6Ms6dSG561E44TA6YvhjqTcus4GbQ9R5Dn37mWH4bEEdabNMCzG7B175PHJC6x9dcVUbtDnmhnhDe2XuRhsNgvabgzy5Ry9MuzgmTirLs7JUvizMg8"; + const EXISTING_OWNER: &str = "4ja2N12Zczh9K25zGFTfao6yPdTZSfA5Bw4QueSmQCYJ"; + + fn create_full_complete_asset() -> AssetCompleteDetails { + let pubkey = Pubkey::new_unique(); + let static_details = AssetStaticDetails { + pubkey: pubkey, + specification_asset_class: SpecificationAssetClass::Nft, + royalty_target_type: RoyaltyTargetType::Creators, + created_at: 12345, + edition_address: None, + }; + + let dynamic_details = AssetDynamicDetails { + pubkey: pubkey, + is_compressible: Updated::new(58, Some(UpdateVersion::Sequence(500)), false), + is_compressed: Updated::new(580, Some(UpdateVersion::Sequence(530)), true), + is_frozen: Updated::new(50, None, false), + supply: Some(Updated::new(50, None, 1)), + seq: Some(Updated::new(580, Some(UpdateVersion::Sequence(530)), 530)), + is_burnt: Updated::new(50, None, false), + was_decompressed: Some(Updated::new(50, None, false)), + onchain_data: Some(Updated::new( + 50, + Some(UpdateVersion::Sequence(530)), + "onchain_data".to_string(), + )), + creators: Updated::new( + 50, + None, + vec![Creator { + creator: pubkey, + creator_verified: true, + creator_share: 100, + }], + ), + royalty_amount: Updated::new(50, None, 100), + url: Updated::new(50, None, "url".to_string()), + chain_mutability: Some(Updated::new(50, None, ChainMutability::Mutable)), + lamports: Some(Updated::new(50, None, 100)), + executable: Some(Updated::new(50, Some(UpdateVersion::Sequence(531)), false)), + metadata_owner: Some(Updated::new( + 50, + Some(UpdateVersion::Sequence(533)), + "metadata_owner".to_string(), + )), + raw_name: Some(Updated::new(50, None, "raw_name".to_string())), + mpl_core_plugins: Some(Updated::new(50, None, "mpl_core_plugins".to_string())), + mpl_core_unknown_plugins: Some(Updated::new( + 50, + None, + "mpl_core_unknown_plugins".to_string(), + )), + rent_epoch: Some(Updated::new(50, Some(UpdateVersion::Sequence(533)), 100)), + num_minted: Some(Updated::new(50, None, 100)), + current_size: Some(Updated::new(50, Some(UpdateVersion::Sequence(533)), 100)), + plugins_json_version: Some(Updated::new(50, Some(UpdateVersion::Sequence(535)), 100)), + mpl_core_external_plugins: Some(Updated::new( + 50, + Some(UpdateVersion::Sequence(537)), + "mpl_core_external_plugins".to_string(), + )), + mpl_core_unknown_external_plugins: Some(Updated::new( + 50, + Some(UpdateVersion::Sequence(539)), + "mpl_core_unknown_external_plugins".to_string(), + )), + mint_extensions: Some(Updated::new(50, None, "mint_extensions".to_string())), + }; + + let authority = AssetAuthority { + pubkey: pubkey, + write_version: Some(500), + slot_updated: 5000, + authority: Pubkey::new_unique(), + }; + let owner = AssetOwner { + pubkey: pubkey, + owner_type: Updated::new(50, None, OwnerType::Single), + owner: Updated::new( + 51, + Some(UpdateVersion::Sequence(53)), + Some(Pubkey::new_unique()), + ), + delegate: Updated::new( + 56, + Some(UpdateVersion::Sequence(54)), + Some(Pubkey::new_unique()), + ), + owner_delegate_seq: Updated::new(58, None, None), + is_current_owner: Updated::new(50, None, true), + }; + + let collection = AssetCollection { + pubkey: pubkey, + collection: Updated::new(50, None, Pubkey::new_unique()), + is_collection_verified: Updated::new(50, None, true), + authority: Updated::new( + 58, + Some(UpdateVersion::Sequence(48)), + Some(Pubkey::new_unique()), + ), + }; + + AssetCompleteDetails { + pubkey, + static_details: Some(static_details), + dynamic_details: Some(dynamic_details), + authority: Some(authority), + owner: Some(owner), + collection: Some(collection), + } + } + + #[test] + fn test_merge_complete_details_with_no_operands_keeps_object_unchanged() { + let asset = create_full_complete_asset(); + let mut builder = FlatBufferBuilder::with_capacity(2500); + let asset_fb = asset.convert_to_fb(&mut builder); + builder.finish_minimal(asset_fb); + let origin_bytes = builder.finished_data(); + let operands = vec![]; + let key = rand::random::<[u8; 32]>(); + let result = merge_complete_details_fb_raw(&key, Some(origin_bytes), operands.into_iter()) + .expect("should return a result"); + assert_eq!(result, origin_bytes); + fb::root_as_asset_complete_details(&result).expect("should decode"); + } + + #[test] + fn test_merge_on_empty_existing_value() { + let asset = create_full_complete_asset(); + let mut builder = FlatBufferBuilder::with_capacity(2500); + let asset_fb = asset.convert_to_fb(&mut builder); + builder.finish_minimal(asset_fb); + let origin_bytes = builder.finished_data(); + let operands = vec![origin_bytes]; + let key = rand::random::<[u8; 32]>(); + let result = merge_complete_details_fb_raw(&key, None, operands.into_iter()) + .expect("should return a result"); + assert_eq!(result, origin_bytes); + fb::root_as_asset_complete_details(&result).expect("should decode"); + } + + #[test] + fn test_merge_only_dynamic_data_on_existing_data_without_dynamic_data() { + let original_asset = create_full_complete_asset(); + let mut asset = original_asset.clone(); + asset.dynamic_details = None; + let operand_asset = AssetCompleteDetails { + pubkey: original_asset.pubkey.clone(), + dynamic_details: original_asset.dynamic_details.clone(), + ..Default::default() + }; + let mut builder = FlatBufferBuilder::with_capacity(2500); + let existing_fb = asset.convert_to_fb(&mut builder); + builder.finish_minimal(existing_fb); + let existing_bytes = builder.finished_data().to_owned(); + builder.reset(); + let operand_asset_fb = operand_asset.convert_to_fb(&mut builder); + builder.finish_minimal(operand_asset_fb); + let operand_bytes = builder.finished_data().to_owned(); + let operands = vec![operand_bytes.as_slice()]; + let key = rand::random::<[u8; 32]>(); + builder.reset(); + let expected_fb = original_asset.convert_to_fb(&mut builder); + builder.finish_minimal(expected_fb); + let expected_bytes = builder.finished_data(); + let result = + merge_complete_details_fb_raw(&key, Some(&existing_bytes), operands.into_iter()) + .expect("should return a result"); + assert_eq!(result, expected_bytes); + let result_asset = fb::root_as_asset_complete_details(&result).expect("should decode"); + assert_eq!(AssetCompleteDetails::from(result_asset), original_asset); + } + + #[test] + fn test_merge_with_some_fields_updated_with_higher_sequences_while_others_have_lower_sequences() + { + let original_asset = create_full_complete_asset(); + let operand_asset = AssetCompleteDetails { + pubkey: original_asset.pubkey.clone(), + dynamic_details: Some(AssetDynamicDetails { + pubkey: original_asset.pubkey.clone(), + is_compressible: Updated::new(59, Some(UpdateVersion::Sequence(510)), true), + is_compressed: Updated::new(580, Some(UpdateVersion::Sequence(530)), false), // should not be updated (should keep original value) + onchain_data: Some(Updated::new( + 50, + Some(UpdateVersion::Sequence(540)), + "new_onchain_data".to_string(), + )), + ..Default::default() + }), + ..Default::default() + }; + let mut expected_asset = original_asset.clone(); + expected_asset.dynamic_details.as_mut().map(|ref mut dd| { + dd.is_compressible = Updated::new(59, Some(UpdateVersion::Sequence(510)), true) + }); + expected_asset.dynamic_details.as_mut().map(|ref mut dd| { + dd.onchain_data = Some(Updated::new( + 50, + Some(UpdateVersion::Sequence(540)), + "new_onchain_data".to_string(), + )) + }); + + let mut builder = FlatBufferBuilder::with_capacity(2500); + let existing_fb = original_asset.convert_to_fb(&mut builder); + builder.finish_minimal(existing_fb); + let existing_bytes = builder.finished_data().to_owned(); + builder.reset(); + let operand_asset_fb = operand_asset.convert_to_fb(&mut builder); + builder.finish_minimal(operand_asset_fb); + let operand_bytes = builder.finished_data().to_owned(); + let operands = vec![operand_bytes.as_slice()]; + let key = rand::random::<[u8; 32]>(); + builder.reset(); + let expected_fb = expected_asset.convert_to_fb(&mut builder); + builder.finish_minimal(expected_fb); + let expected_bytes = builder.finished_data(); + let result = + merge_complete_details_fb_raw(&key, Some(&existing_bytes), operands.into_iter()) + .expect("should return a result"); + let result_asset = fb::root_as_asset_complete_details(&result).expect("should decode"); + assert_eq!(AssetCompleteDetails::from(result_asset), expected_asset); + assert_eq!(result, expected_bytes); + } + + #[test] + fn test_verify_backward_compatibility_decoding() { + let data_bytes = solana_sdk::bs58::decode(TEST_DATA).into_vec().unwrap(); + + let asset; + unsafe { + asset = crate::asset_generated::asset::root_as_asset_complete_details_unchecked( + data_bytes.as_slice(), + ); + } + let asset_mapped = AssetCompleteDetails::from(asset); + println!("STATIC: {:#?}", asset.static_details().is_some()); + println!("DYNAMIC: {:#?}", asset.dynamic_details().is_some()); + println!("OWNER: {:#?}", asset.owner().is_some()); + println!("AUTHORITY: {:#?}", asset.authority().is_some()); + println!("COLLECTION: {:#?}", asset.collection().is_some()); + println!("SERIALIZED: {:#?}", asset_mapped); + } + + #[test] + fn test_merge_with_same_pubkey_higher_write_version() { + let data_bytes = solana_sdk::bs58::decode(TEST_DATA).into_vec().unwrap(); + let new_owner = Pubkey::from_str("2jL7yFGXkKE9oi1xHsA45UzV5491PD55AugGJiTbUr9m").unwrap(); + let owner = AssetOwner { + pubkey: Pubkey::from_str("DvpMQyF8sT6hPBewQf6VrVESw6L1zewPyNit1CSt1tDJ").unwrap(), + // a new owner with smaller slot and higher write version + owner: Updated { + value: Some(new_owner.clone()), + slot_updated: 1, + update_version: Some(UpdateVersion::WriteVersion(388329656)), + }, + is_current_owner: Updated { + value: true, + slot_updated: 1, + update_version: Some(UpdateVersion::WriteVersion(388329656)), + }, + ..Default::default() + }; + let mut builder = FlatBufferBuilder::new(); + let asset_complete_details = owner.convert_to_fb(&mut builder); + builder.finish_minimal(asset_complete_details); + let operand_bytes = builder.finished_data(); + + let merge_result = merge_complete_details_fb_simple_raw( + &[], + Some(&data_bytes.as_slice()), + vec![operand_bytes].into_iter(), + ) + .expect("expected merge to return some value"); + + let asset; + unsafe { + asset = crate::asset_generated::asset::root_as_asset_complete_details_unchecked( + merge_result.as_slice(), + ); + } + assert!(asset.other_known_owners().is_none()); + let asset_mapped = AssetCompleteDetails::from(asset); + assert_eq!(asset_mapped.owner.unwrap().owner.value.unwrap(), new_owner); + } + + #[test] + fn test_merge_with_same_pubkey_higher_slot_smaller_write_version() { + let data_bytes = solana_sdk::bs58::decode(TEST_DATA).into_vec().unwrap(); + let new_owner = Pubkey::from_str("2jL7yFGXkKE9oi1xHsA45UzV5491PD55AugGJiTbUr9m").unwrap(); + let owner = AssetOwner { + pubkey: Pubkey::from_str("DvpMQyF8sT6hPBewQf6VrVESw6L1zewPyNit1CSt1tDJ").unwrap(), + // a new owner with higher slot and smaller write version + owner: Updated { + value: Some(new_owner.clone()), + slot_updated: u64::MAX, + update_version: Some(UpdateVersion::WriteVersion(388329654)), + }, + is_current_owner: Updated { + value: true, + slot_updated: u64::MAX, + update_version: Some(UpdateVersion::WriteVersion(388329654)), + }, + ..Default::default() + }; + let mut builder = FlatBufferBuilder::new(); + let asset_complete_details = owner.convert_to_fb(&mut builder); + builder.finish_minimal(asset_complete_details); + let operand_bytes = builder.finished_data(); + + let merge_result = merge_complete_details_fb_simple_raw( + &[], + Some(&data_bytes.as_slice()), + vec![operand_bytes].into_iter(), + ) + .expect("expected merge to return some value"); + + let asset; + unsafe { + asset = crate::asset_generated::asset::root_as_asset_complete_details_unchecked( + merge_result.as_slice(), + ); + } + assert!(asset.other_known_owners().is_none()); + let asset_mapped = AssetCompleteDetails::from(asset); + assert_eq!( + asset_mapped.owner.as_ref().unwrap().owner.value.unwrap(), + Pubkey::from_str(EXISTING_OWNER).unwrap() + ); + // This is ther case, when the is current owner was not ever set and now it's updated by some old update. + // This update shouldn't have happened as usually the vesioning of owner, delegate and is_current_owner is done together. + // For the case of empty data after the migration this is acceptable imho. + assert!(asset_mapped.owner.unwrap().is_current_owner.value); + } + + #[test] + fn test_merge_with_different_pubkeys_same_slot_3_owners_changed() { + // The asset is transferred from owner A to owner B and then to owner C + // This means the following updates will happen: owner A(is current owner = false), owner B(is current owner = true), owner B(is current owner = false), owner C(is current owner = true) + // the final state should be owner C as the current owner and owner A and B in the other known owners + // this should happen without any regards to the order of updates + let original_data_bytes = solana_sdk::bs58::decode(TEST_DATA).into_vec().unwrap(); + let owner_a = Pubkey::from_str(EXISTING_OWNER).unwrap(); + let _owner_a_token_account_pubkey = + Pubkey::from_str("DvpMQyF8sT6hPBewQf6VrVESw6L1zewPyNit1CSt1tDJ").unwrap(); + + let owner_b = Pubkey::from_str("2jL7yFGXkKE9oi1xHsA45UzV5491PD55AugGJiTbUr9m").unwrap(); + let owner_b_token_account_pubkey = Pubkey::new_unique(); + + let owner_c = Pubkey::from_str("9Rfs2otkZpsLPomKUGku7DaFv9YvtkV9a87nqTUgMBhC").unwrap(); + let owner_c_token_account_pubkey = Pubkey::new_unique(); + + let slot = 26979338; + + let mut builder = FlatBufferBuilder::new(); + + let owner_a_not_an_owner_data = AssetOwner { + pubkey: Pubkey::from_str("DvpMQyF8sT6hPBewQf6VrVESw6L1zewPyNit1CSt1tDJ").unwrap(), + owner: Updated { + value: Some(owner_a.clone()), + slot_updated: slot, + update_version: Some(UpdateVersion::WriteVersion(388329657)), + }, + is_current_owner: Updated { + value: false, + slot_updated: slot, + update_version: Some(UpdateVersion::WriteVersion(388329657)), + }, + ..Default::default() + } + .convert_to_fb(&mut builder); + builder.finish_minimal(owner_a_not_an_owner_data); + let owner_a_not_an_owner_data = builder.finished_data().to_vec(); + builder.reset(); + + let owner_b_is_owner_data = AssetOwner { + pubkey: owner_b_token_account_pubkey, + owner: Updated { + value: Some(owner_b.clone()), + slot_updated: slot, + update_version: Some(UpdateVersion::WriteVersion(10)), + }, + is_current_owner: Updated { + value: true, + slot_updated: slot, + update_version: Some(UpdateVersion::WriteVersion(10)), + }, + ..Default::default() + } + .convert_to_fb(&mut builder); + builder.finish_minimal(owner_b_is_owner_data); + let owner_b_is_owner_data = builder.finished_data().to_vec(); + builder.reset(); + + let owner_b_not_owner_data = AssetOwner { + pubkey: owner_b_token_account_pubkey, + owner: Updated { + value: Some(owner_b.clone()), + slot_updated: slot, + update_version: Some(UpdateVersion::WriteVersion(11)), + }, + is_current_owner: Updated { + value: false, + slot_updated: slot, + update_version: Some(UpdateVersion::WriteVersion(11)), + }, + ..Default::default() + } + .convert_to_fb(&mut builder); + builder.finish_minimal(owner_b_not_owner_data); + let owner_b_not_owner_data = builder.finished_data().to_vec(); + builder.reset(); + + let owner_c_is_owner_data = AssetOwner { + pubkey: owner_c_token_account_pubkey, + owner: Updated { + value: Some(owner_c.clone()), + slot_updated: slot, + update_version: Some(UpdateVersion::WriteVersion(12)), + }, + is_current_owner: Updated { + value: true, + slot_updated: slot, + update_version: Some(UpdateVersion::WriteVersion(12)), + }, + ..Default::default() + } + .convert_to_fb(&mut builder); + builder.finish_minimal(owner_c_is_owner_data); + let owner_c_is_owner_data = builder.finished_data().to_vec(); + builder.reset(); + + // collect all the possible combinations of the updates, as the order of the updates should not matter + // first using a single call with multiple operands, then using multiple calls with a single operand + // all the combinations should result in the same final bytes + + // Collect all the updates into a vector + let updates = vec![ + ("A", owner_a_not_an_owner_data.as_slice().clone()), + ("B1", owner_b_is_owner_data.as_slice().clone()), + ("B2", owner_b_not_owner_data.as_slice().clone()), + ("C", owner_c_is_owner_data.as_slice().clone()), + ]; + + // Generate all permutations of the updates + let permutations = updates.iter().permutations(updates.len()); + let mut expected_result: Option> = None; + + let merge_result = merge_complete_details_fb_simple_raw( + &[], + Some(&original_data_bytes.as_slice()), + vec![ + owner_a_not_an_owner_data.as_slice().clone(), + owner_b_is_owner_data.as_slice().clone(), + owner_b_not_owner_data.as_slice().clone(), + owner_c_is_owner_data.as_slice().clone(), + ] + .into_iter(), //perm.into_iter().map(|d| *d), + ) + .expect("expected merge to return some value"); + expected_result = Some(merge_result); + + for perm in permutations { + let merge_result = merge_complete_details_fb_simple_raw( + &[], + Some(&original_data_bytes.as_slice()), + perm.clone().into_iter().map(|(_, d)| *d), + ) + .expect("expected merge to return some value"); + let perm_name = perm.iter().map(|(k, _)| k).join(", "); + let asset; + unsafe { + asset = crate::asset_generated::asset::root_as_asset_complete_details_unchecked( + merge_result.as_slice(), + ); + } + let asset_mapped = AssetCompleteDetails::from(asset); + assert!( + asset_mapped.owner.as_ref().unwrap().is_current_owner.value, + "owner should be current for one permutation {}", + perm_name + ); + assert_eq!( + asset_mapped.owner.as_ref().unwrap().owner.value.unwrap(), + owner_c, + "Owner should be C for one permutation {}", + perm_name, + ); + assert!(asset.other_known_owners().is_some()); + assert_eq!(asset.other_known_owners().unwrap().len(), 2); + assert_eq!( + asset + .other_known_owners() + .unwrap() + .get(0) + .is_current_owner() + .unwrap() + .value(), + false + ); + assert_eq!( + asset + .other_known_owners() + .unwrap() + .get(1) + .is_current_owner() + .unwrap() + .value(), + false + ); + if let Some(expected) = &expected_result { + assert_eq!( + &merge_result, expected, + "Merge result differs for one permutation {}", + perm_name, + ); + } + } + } +} diff --git a/rocks-db/src/asset_client.rs b/rocks-db/src/asset_client.rs index 0411a916c..b2793e983 100644 --- a/rocks-db/src/asset_client.rs +++ b/rocks-db/src/asset_client.rs @@ -3,14 +3,16 @@ use solana_sdk::pubkey::Pubkey; use std::sync::atomic::Ordering; use crate::asset::{ - AssetSelectedMaps, AssetsUpdateIdx, FungibleAssetsUpdateIdx, SlotAssetIdx, SlotAssetIdxKey, + AssetCollection, AssetCompleteDetails, AssetSelectedMaps, AssetsUpdateIdx, SlotAssetIdx, + SlotAssetIdxKey,FungibleAssetsUpdateIdx, }; -use crate::column::Column; +use crate::asset_generated::asset as fb; +use crate::column::{Column, TypedColumn}; use crate::errors::StorageError; use crate::key_encoders::encode_u64x2_pubkey; -use crate::{Result, Storage}; +use crate::{Result, Storage, BATCH_GET_ACTION, ROCKS_COMPONENT}; use entities::api_req_params::Options; -use entities::enums::{AssetType, TokenMetadataEdition}; +use entities::enums::{AssetType, SpecificationAssetClass, TokenMetadataEdition}; use entities::models::{EditionData, PubkeyWithSlot}; use futures_util::FutureExt; use std::collections::HashMap; @@ -148,11 +150,8 @@ impl Storage { owner_address: &Option, options: &Options, ) -> Result { - let assets_dynamic_fut = self.asset_dynamic_data.batch_get(asset_ids.clone()); - let assets_static_fut = self.asset_static_data.batch_get(asset_ids.clone()); - let assets_authority_fut = self.asset_authority_data.batch_get(asset_ids.clone()); - let assets_collection_fut = self.asset_collection_data.batch_get(asset_ids.clone()); - let assets_owner_fut = self.asset_owner_data.batch_get(asset_ids.clone()); + let assets_with_collections_and_urls_fut = + self.get_assets_with_collections_and_urls(asset_ids.clone()); let assets_leaf_fut = self.asset_leaf_data.batch_get(asset_ids.clone()); let token_accounts_fut = if let Some(owner_address) = owner_address { self.get_raw_token_accounts(Some(*owner_address), None, None, None, None, None, true) @@ -162,80 +161,73 @@ impl Storage { }; let spl_mints_fut = self.spl_mints.batch_get(asset_ids.clone()); - let mut assets_dynamic = to_map!(assets_dynamic_fut.await); - let mut urls: HashMap<_, _> = assets_dynamic - .iter() - .map(|(key, asset)| (key.to_string(), asset.url.value.clone())) - .collect(); + let inscriptions_fut = if options.show_inscription { + self.inscriptions.batch_get(asset_ids.clone()).boxed() + } else { + async { Ok(Vec::new()) }.boxed() + }; + let (mut assets_data, assets_collection_pks, mut urls) = + assets_with_collections_and_urls_fut.await?; + let mut mpl_core_collections = HashMap::new(); + // todo: consider async/future here, but not likely as the very next call depends on urls from this one + if !assets_collection_pks.is_empty() { + let assets_collection_pks = assets_collection_pks.into_iter().collect::>(); + let start_time = chrono::Utc::now(); + let collection_d = self.db.batched_multi_get_cf( + &self.asset_data.handle(), + assets_collection_pks.clone(), + false, + ); + for asset in collection_d { + let asset = asset?; + if let Some(asset) = asset { + let asset = fb::root_as_asset_complete_details(asset.as_ref()) + .map_err(|e| StorageError::Common(e.to_string()))?; + let key = + Pubkey::new_from_array(asset.pubkey().unwrap().bytes().try_into().unwrap()); + if options.show_collection_metadata { + asset + .dynamic_details() + .and_then(|d| d.url()) + .and_then(|u| u.value()) + .map(|u| urls.insert(key, u.to_string())); + assets_data.insert(key, asset.into()); + } + if let Some(collection) = asset.collection() { + mpl_core_collections.insert(key, AssetCollection::from(collection)); + } + } + } + self.red_metrics.observe_request( + ROCKS_COMPONENT, + BATCH_GET_ACTION, + "get_asset_collection", + start_time, + ); + } + let offchain_data_fut = self .asset_offchain_data .batch_get(urls.clone().into_values().collect::>()); - let ( - assets_static, - assets_authority, - assets_collection, - assets_owner, - assets_leaf, - offchain_data, - token_accounts, - spl_mints, - ) = tokio::join!( - assets_static_fut, - assets_authority_fut, - assets_collection_fut, - assets_owner_fut, + let (assets_leaf, offchain_data, token_accounts, spl_mints) = tokio::join!( assets_leaf_fut, offchain_data_fut, token_accounts_fut, spl_mints_fut ); - let mut offchain_data = offchain_data + let offchain_data = offchain_data .map_err(|e| StorageError::Common(e.to_string()))? .into_iter() - .filter_map(|asset| asset.map(|a| (a.url.clone(), a))) + .filter_map(|asset| { + asset + .filter(|a| !a.metadata.is_empty()) + .map(|a| (a.url.clone(), a)) + }) .collect::>(); - let assets_static = to_map!(assets_static); - let assets_collection_pks = assets_collection - .as_ref() - .map_err(|e| StorageError::Common(e.to_string()))? - .iter() - .flat_map(|c| c.as_ref().map(|c| c.collection.value)) - .collect::>(); - if options.show_collection_metadata { - let collection_dynamic_data = to_map!( - self.asset_dynamic_data - .batch_get(assets_collection_pks.clone()) - .await - ); - assets_dynamic.extend(collection_dynamic_data.clone()); - let collection_urls: HashMap<_, _> = collection_dynamic_data - .iter() - .map(|(key, asset)| (key.to_string(), asset.url.value.clone())) - .collect(); - urls.extend(collection_urls.clone()); - let collection_offchain_data = self - .asset_offchain_data - .batch_get(collection_urls.clone().into_values().collect::>()) - .await - .map_err(|e| StorageError::Common(e.to_string()))? - .into_iter() - .filter_map(|asset| asset.map(|a| (a.url.clone(), a))) - .collect::>(); - offchain_data.extend(collection_offchain_data) - }; - let mpl_core_collections = to_map!( - self.asset_collection_data - .batch_get(assets_collection_pks) - .await - ); - let mut assets_collection = to_map!(assets_collection); - assets_collection.extend(mpl_core_collections); let (inscriptions, inscriptions_data) = if options.show_inscription { - let inscriptions = self - .inscriptions - .batch_get(asset_ids.clone()) + let inscriptions = inscriptions_fut .await .map_err(|e| StorageError::Common(e.to_string()))? .into_iter() @@ -256,27 +248,49 @@ impl Storage { (HashMap::new(), HashMap::new()) }; let token_accounts = token_accounts.map_err(|e| StorageError::Common(e.to_string()))?; + let spl_mints = to_map!(spl_mints); + // As we can not rely on the asset class from the database, we need to check the mint + assets_data + .iter_mut() + .filter(|(_, asset)| { + asset.static_details.as_ref().is_some_and(|sd| { + sd.specification_asset_class == SpecificationAssetClass::FungibleAsset + || sd.specification_asset_class == SpecificationAssetClass::FungibleToken + }) + }) + .for_each(|(_, ref mut asset)| { + if spl_mints + .get(&asset.pubkey) + .map(|spl_mint| spl_mint.is_nft()) + .unwrap_or(false) + { + asset + .static_details + .as_mut() + .map(|sd| sd.specification_asset_class = SpecificationAssetClass::Nft); + } + }); Ok(AssetSelectedMaps { editions: self .get_editions( - assets_static + assets_data .values() - .filter_map(|s| s.edition_address) + .filter_map(|a: &crate::asset::AssetCompleteDetails| { + a.static_details.as_ref().map(|s| s.edition_address) + }) + .flatten() .collect::>(), ) .await?, - assets_static, - assets_dynamic, - assets_authority: to_map!(assets_authority), - assets_collection, - assets_owner: to_map!(assets_owner), + mpl_core_collections, + asset_complete_details: assets_data, assets_leaf: to_map!(assets_leaf), offchain_data, urls, inscriptions, inscriptions_data, - spl_mints: to_map!(spl_mints), + spl_mints, token_accounts: token_accounts .into_iter() .flat_map(|ta| ta.map(|ta| (ta.mint, ta))) @@ -284,6 +298,7 @@ impl Storage { }) } + // todo: review this method as it has 2 more awaits async fn get_editions( &self, edition_keys: Vec, @@ -346,4 +361,38 @@ impl Storage { .map(|edition| (edition.key, edition)) .collect::>()) } + + pub fn get_complete_asset_details( + &self, + pubkey: Pubkey, + ) -> Result> { + let data = self.db.get_pinned_cf( + &self.db.cf_handle(AssetCompleteDetails::NAME).unwrap(), + pubkey, + )?; + match data { + Some(data) => { + let asset = fb::root_as_asset_complete_details(&data) + .map_err(|e| StorageError::Common(e.to_string()))?; + Ok(Some(AssetCompleteDetails::from(asset))) + } + _ => Ok(None), + } + } + + #[cfg(test)] + pub fn put_complete_asset_details_batch( + &self, + assets: HashMap, + ) -> Result<()> { + let mut batch = rocksdb::WriteBatchWithTransaction::::default(); + for (pubkey, asset) in assets { + batch.put_cf( + &self.asset_data.handle(), + pubkey, + asset.convert_to_fb_bytes(), + ); + } + self.db.write(batch).map_err(StorageError::RocksDb) + } } diff --git a/rocks-db/src/asset_generated.rs b/rocks-db/src/asset_generated.rs new file mode 100644 index 000000000..1c1aab934 --- /dev/null +++ b/rocks-db/src/asset_generated.rs @@ -0,0 +1,4739 @@ +// automatically generated by the FlatBuffers compiler, do not modify + +// @generated + +use core::cmp::Ordering; +use core::mem; + +extern crate flatbuffers; +use self::flatbuffers::{EndianScalar, Follow}; + +#[allow(unused_imports, dead_code)] +pub mod asset { + + use core::cmp::Ordering; + use core::mem; + + extern crate flatbuffers; + use self::flatbuffers::{EndianScalar, Follow}; + + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MIN_SPECIFICATION_ASSET_CLASS: i8 = 0; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MAX_SPECIFICATION_ASSET_CLASS: i8 = 11; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + #[allow(non_camel_case_types)] + pub const ENUM_VALUES_SPECIFICATION_ASSET_CLASS: [SpecificationAssetClass; 12] = [ + SpecificationAssetClass::Unknown, + SpecificationAssetClass::FungibleToken, + SpecificationAssetClass::FungibleAsset, + SpecificationAssetClass::Nft, + SpecificationAssetClass::PrintableNft, + SpecificationAssetClass::ProgrammableNft, + SpecificationAssetClass::Print, + SpecificationAssetClass::TransferRestrictedNft, + SpecificationAssetClass::NonTransferableNft, + SpecificationAssetClass::IdentityNft, + SpecificationAssetClass::MplCoreAsset, + SpecificationAssetClass::MplCoreCollection, + ]; + + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] + #[repr(transparent)] + pub struct SpecificationAssetClass(pub i8); + #[allow(non_upper_case_globals)] + impl SpecificationAssetClass { + pub const Unknown: Self = Self(0); + pub const FungibleToken: Self = Self(1); + pub const FungibleAsset: Self = Self(2); + pub const Nft: Self = Self(3); + pub const PrintableNft: Self = Self(4); + pub const ProgrammableNft: Self = Self(5); + pub const Print: Self = Self(6); + pub const TransferRestrictedNft: Self = Self(7); + pub const NonTransferableNft: Self = Self(8); + pub const IdentityNft: Self = Self(9); + pub const MplCoreAsset: Self = Self(10); + pub const MplCoreCollection: Self = Self(11); + + pub const ENUM_MIN: i8 = 0; + pub const ENUM_MAX: i8 = 11; + pub const ENUM_VALUES: &'static [Self] = &[ + Self::Unknown, + Self::FungibleToken, + Self::FungibleAsset, + Self::Nft, + Self::PrintableNft, + Self::ProgrammableNft, + Self::Print, + Self::TransferRestrictedNft, + Self::NonTransferableNft, + Self::IdentityNft, + Self::MplCoreAsset, + Self::MplCoreCollection, + ]; + /// Returns the variant's name or "" if unknown. + pub fn variant_name(self) -> Option<&'static str> { + match self { + Self::Unknown => Some("Unknown"), + Self::FungibleToken => Some("FungibleToken"), + Self::FungibleAsset => Some("FungibleAsset"), + Self::Nft => Some("Nft"), + Self::PrintableNft => Some("PrintableNft"), + Self::ProgrammableNft => Some("ProgrammableNft"), + Self::Print => Some("Print"), + Self::TransferRestrictedNft => Some("TransferRestrictedNft"), + Self::NonTransferableNft => Some("NonTransferableNft"), + Self::IdentityNft => Some("IdentityNft"), + Self::MplCoreAsset => Some("MplCoreAsset"), + Self::MplCoreCollection => Some("MplCoreCollection"), + _ => None, + } + } + } + impl core::fmt::Debug for SpecificationAssetClass { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if let Some(name) = self.variant_name() { + f.write_str(name) + } else { + f.write_fmt(format_args!("", self.0)) + } + } + } + impl<'a> flatbuffers::Follow<'a> for SpecificationAssetClass { + type Inner = Self; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + let b = flatbuffers::read_scalar_at::(buf, loc); + Self(b) + } + } + + impl flatbuffers::Push for SpecificationAssetClass { + type Output = SpecificationAssetClass; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + flatbuffers::emplace_scalar::(dst, self.0); + } + } + + impl flatbuffers::EndianScalar for SpecificationAssetClass { + type Scalar = i8; + #[inline] + fn to_little_endian(self) -> i8 { + self.0.to_le() + } + #[inline] + #[allow(clippy::wrong_self_convention)] + fn from_little_endian(v: i8) -> Self { + let b = i8::from_le(v); + Self(b) + } + } + + impl<'a> flatbuffers::Verifiable for SpecificationAssetClass { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + i8::run_verifier(v, pos) + } + } + + impl flatbuffers::SimpleToVerifyInSlice for SpecificationAssetClass {} + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MIN_ROYALTY_TARGET_TYPE: i8 = 0; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MAX_ROYALTY_TARGET_TYPE: i8 = 3; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + #[allow(non_camel_case_types)] + pub const ENUM_VALUES_ROYALTY_TARGET_TYPE: [RoyaltyTargetType; 4] = [ + RoyaltyTargetType::Unknown, + RoyaltyTargetType::Creators, + RoyaltyTargetType::Fanout, + RoyaltyTargetType::Single, + ]; + + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] + #[repr(transparent)] + pub struct RoyaltyTargetType(pub i8); + #[allow(non_upper_case_globals)] + impl RoyaltyTargetType { + pub const Unknown: Self = Self(0); + pub const Creators: Self = Self(1); + pub const Fanout: Self = Self(2); + pub const Single: Self = Self(3); + + pub const ENUM_MIN: i8 = 0; + pub const ENUM_MAX: i8 = 3; + pub const ENUM_VALUES: &'static [Self] = + &[Self::Unknown, Self::Creators, Self::Fanout, Self::Single]; + /// Returns the variant's name or "" if unknown. + pub fn variant_name(self) -> Option<&'static str> { + match self { + Self::Unknown => Some("Unknown"), + Self::Creators => Some("Creators"), + Self::Fanout => Some("Fanout"), + Self::Single => Some("Single"), + _ => None, + } + } + } + impl core::fmt::Debug for RoyaltyTargetType { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if let Some(name) = self.variant_name() { + f.write_str(name) + } else { + f.write_fmt(format_args!("", self.0)) + } + } + } + impl<'a> flatbuffers::Follow<'a> for RoyaltyTargetType { + type Inner = Self; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + let b = flatbuffers::read_scalar_at::(buf, loc); + Self(b) + } + } + + impl flatbuffers::Push for RoyaltyTargetType { + type Output = RoyaltyTargetType; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + flatbuffers::emplace_scalar::(dst, self.0); + } + } + + impl flatbuffers::EndianScalar for RoyaltyTargetType { + type Scalar = i8; + #[inline] + fn to_little_endian(self) -> i8 { + self.0.to_le() + } + #[inline] + #[allow(clippy::wrong_self_convention)] + fn from_little_endian(v: i8) -> Self { + let b = i8::from_le(v); + Self(b) + } + } + + impl<'a> flatbuffers::Verifiable for RoyaltyTargetType { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + i8::run_verifier(v, pos) + } + } + + impl flatbuffers::SimpleToVerifyInSlice for RoyaltyTargetType {} + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MIN_OWNER_TYPE: i8 = 0; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MAX_OWNER_TYPE: i8 = 2; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + #[allow(non_camel_case_types)] + pub const ENUM_VALUES_OWNER_TYPE: [OwnerType; 3] = + [OwnerType::Unknown, OwnerType::Token, OwnerType::Single]; + + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] + #[repr(transparent)] + pub struct OwnerType(pub i8); + #[allow(non_upper_case_globals)] + impl OwnerType { + pub const Unknown: Self = Self(0); + pub const Token: Self = Self(1); + pub const Single: Self = Self(2); + + pub const ENUM_MIN: i8 = 0; + pub const ENUM_MAX: i8 = 2; + pub const ENUM_VALUES: &'static [Self] = &[Self::Unknown, Self::Token, Self::Single]; + /// Returns the variant's name or "" if unknown. + pub fn variant_name(self) -> Option<&'static str> { + match self { + Self::Unknown => Some("Unknown"), + Self::Token => Some("Token"), + Self::Single => Some("Single"), + _ => None, + } + } + } + impl core::fmt::Debug for OwnerType { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if let Some(name) = self.variant_name() { + f.write_str(name) + } else { + f.write_fmt(format_args!("", self.0)) + } + } + } + impl<'a> flatbuffers::Follow<'a> for OwnerType { + type Inner = Self; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + let b = flatbuffers::read_scalar_at::(buf, loc); + Self(b) + } + } + + impl flatbuffers::Push for OwnerType { + type Output = OwnerType; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + flatbuffers::emplace_scalar::(dst, self.0); + } + } + + impl flatbuffers::EndianScalar for OwnerType { + type Scalar = i8; + #[inline] + fn to_little_endian(self) -> i8 { + self.0.to_le() + } + #[inline] + #[allow(clippy::wrong_self_convention)] + fn from_little_endian(v: i8) -> Self { + let b = i8::from_le(v); + Self(b) + } + } + + impl<'a> flatbuffers::Verifiable for OwnerType { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + i8::run_verifier(v, pos) + } + } + + impl flatbuffers::SimpleToVerifyInSlice for OwnerType {} + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MIN_CHAIN_MUTABILITY: i8 = 0; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MAX_CHAIN_MUTABILITY: i8 = 1; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + #[allow(non_camel_case_types)] + pub const ENUM_VALUES_CHAIN_MUTABILITY: [ChainMutability; 2] = + [ChainMutability::Immutable, ChainMutability::Mutable]; + + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] + #[repr(transparent)] + pub struct ChainMutability(pub i8); + #[allow(non_upper_case_globals)] + impl ChainMutability { + pub const Immutable: Self = Self(0); + pub const Mutable: Self = Self(1); + + pub const ENUM_MIN: i8 = 0; + pub const ENUM_MAX: i8 = 1; + pub const ENUM_VALUES: &'static [Self] = &[Self::Immutable, Self::Mutable]; + /// Returns the variant's name or "" if unknown. + pub fn variant_name(self) -> Option<&'static str> { + match self { + Self::Immutable => Some("Immutable"), + Self::Mutable => Some("Mutable"), + _ => None, + } + } + } + impl core::fmt::Debug for ChainMutability { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if let Some(name) = self.variant_name() { + f.write_str(name) + } else { + f.write_fmt(format_args!("", self.0)) + } + } + } + impl<'a> flatbuffers::Follow<'a> for ChainMutability { + type Inner = Self; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + let b = flatbuffers::read_scalar_at::(buf, loc); + Self(b) + } + } + + impl flatbuffers::Push for ChainMutability { + type Output = ChainMutability; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + flatbuffers::emplace_scalar::(dst, self.0); + } + } + + impl flatbuffers::EndianScalar for ChainMutability { + type Scalar = i8; + #[inline] + fn to_little_endian(self) -> i8 { + self.0.to_le() + } + #[inline] + #[allow(clippy::wrong_self_convention)] + fn from_little_endian(v: i8) -> Self { + let b = i8::from_le(v); + Self(b) + } + } + + impl<'a> flatbuffers::Verifiable for ChainMutability { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + i8::run_verifier(v, pos) + } + } + + impl flatbuffers::SimpleToVerifyInSlice for ChainMutability {} + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MIN_UPDATE_VERSION_TYPE: i8 = 0; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MAX_UPDATE_VERSION_TYPE: i8 = 2; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + #[allow(non_camel_case_types)] + pub const ENUM_VALUES_UPDATE_VERSION_TYPE: [UpdateVersionType; 3] = [ + UpdateVersionType::None, + UpdateVersionType::Sequence, + UpdateVersionType::WriteVersion, + ]; + + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] + #[repr(transparent)] + pub struct UpdateVersionType(pub i8); + #[allow(non_upper_case_globals)] + impl UpdateVersionType { + pub const None: Self = Self(0); + pub const Sequence: Self = Self(1); + pub const WriteVersion: Self = Self(2); + + pub const ENUM_MIN: i8 = 0; + pub const ENUM_MAX: i8 = 2; + pub const ENUM_VALUES: &'static [Self] = &[Self::None, Self::Sequence, Self::WriteVersion]; + /// Returns the variant's name or "" if unknown. + pub fn variant_name(self) -> Option<&'static str> { + match self { + Self::None => Some("None"), + Self::Sequence => Some("Sequence"), + Self::WriteVersion => Some("WriteVersion"), + _ => None, + } + } + } + impl core::fmt::Debug for UpdateVersionType { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if let Some(name) = self.variant_name() { + f.write_str(name) + } else { + f.write_fmt(format_args!("", self.0)) + } + } + } + impl<'a> flatbuffers::Follow<'a> for UpdateVersionType { + type Inner = Self; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + let b = flatbuffers::read_scalar_at::(buf, loc); + Self(b) + } + } + + impl flatbuffers::Push for UpdateVersionType { + type Output = UpdateVersionType; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + flatbuffers::emplace_scalar::(dst, self.0); + } + } + + impl flatbuffers::EndianScalar for UpdateVersionType { + type Scalar = i8; + #[inline] + fn to_little_endian(self) -> i8 { + self.0.to_le() + } + #[inline] + #[allow(clippy::wrong_self_convention)] + fn from_little_endian(v: i8) -> Self { + let b = i8::from_le(v); + Self(b) + } + } + + impl<'a> flatbuffers::Verifiable for UpdateVersionType { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + i8::run_verifier(v, pos) + } + } + + impl flatbuffers::SimpleToVerifyInSlice for UpdateVersionType {} + pub enum UpdateVersionOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct UpdateVersion<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for UpdateVersion<'a> { + type Inner = UpdateVersion<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> UpdateVersion<'a> { + pub const VT_VERSION_TYPE: flatbuffers::VOffsetT = 4; + pub const VT_VERSION_VALUE: flatbuffers::VOffsetT = 6; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + UpdateVersion { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args UpdateVersionArgs, + ) -> flatbuffers::WIPOffset> { + let mut builder = UpdateVersionBuilder::new(_fbb); + builder.add_version_value(args.version_value); + builder.add_version_type(args.version_type); + builder.finish() + } + + #[inline] + pub fn version_type(&self) -> UpdateVersionType { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::( + UpdateVersion::VT_VERSION_TYPE, + Some(UpdateVersionType::None), + ) + .unwrap() + } + } + #[inline] + pub fn version_value(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(UpdateVersion::VT_VERSION_VALUE, Some(0)) + .unwrap() + } + } + } + + impl flatbuffers::Verifiable for UpdateVersion<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("version_type", Self::VT_VERSION_TYPE, false)? + .visit_field::("version_value", Self::VT_VERSION_VALUE, false)? + .finish(); + Ok(()) + } + } + pub struct UpdateVersionArgs { + pub version_type: UpdateVersionType, + pub version_value: u64, + } + impl<'a> Default for UpdateVersionArgs { + #[inline] + fn default() -> Self { + UpdateVersionArgs { + version_type: UpdateVersionType::None, + version_value: 0, + } + } + } + + pub struct UpdateVersionBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> UpdateVersionBuilder<'a, 'b, A> { + #[inline] + pub fn add_version_type(&mut self, version_type: UpdateVersionType) { + self.fbb_.push_slot::( + UpdateVersion::VT_VERSION_TYPE, + version_type, + UpdateVersionType::None, + ); + } + #[inline] + pub fn add_version_value(&mut self, version_value: u64) { + self.fbb_ + .push_slot::(UpdateVersion::VT_VERSION_VALUE, version_value, 0); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> UpdateVersionBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + UpdateVersionBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for UpdateVersion<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("UpdateVersion"); + ds.field("version_type", &self.version_type()); + ds.field("version_value", &self.version_value()); + ds.finish() + } + } + pub enum UpdatedBoolOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct UpdatedBool<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for UpdatedBool<'a> { + type Inner = UpdatedBool<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> UpdatedBool<'a> { + pub const VT_SLOT_UPDATED: flatbuffers::VOffsetT = 4; + pub const VT_UPDATE_VERSION: flatbuffers::VOffsetT = 6; + pub const VT_VALUE: flatbuffers::VOffsetT = 8; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + UpdatedBool { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args UpdatedBoolArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = UpdatedBoolBuilder::new(_fbb); + builder.add_slot_updated(args.slot_updated); + if let Some(x) = args.update_version { + builder.add_update_version(x); + } + builder.add_value(args.value); + builder.finish() + } + + #[inline] + pub fn slot_updated(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(UpdatedBool::VT_SLOT_UPDATED, Some(0)) + .unwrap() + } + } + #[inline] + pub fn update_version(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + UpdatedBool::VT_UPDATE_VERSION, + None, + ) + } + } + #[inline] + pub fn value(&self) -> bool { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(UpdatedBool::VT_VALUE, Some(false)) + .unwrap() + } + } + } + + impl flatbuffers::Verifiable for UpdatedBool<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("slot_updated", Self::VT_SLOT_UPDATED, false)? + .visit_field::>( + "update_version", + Self::VT_UPDATE_VERSION, + false, + )? + .visit_field::("value", Self::VT_VALUE, false)? + .finish(); + Ok(()) + } + } + pub struct UpdatedBoolArgs<'a> { + pub slot_updated: u64, + pub update_version: Option>>, + pub value: bool, + } + impl<'a> Default for UpdatedBoolArgs<'a> { + #[inline] + fn default() -> Self { + UpdatedBoolArgs { + slot_updated: 0, + update_version: None, + value: false, + } + } + } + + pub struct UpdatedBoolBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> UpdatedBoolBuilder<'a, 'b, A> { + #[inline] + pub fn add_slot_updated(&mut self, slot_updated: u64) { + self.fbb_ + .push_slot::(UpdatedBool::VT_SLOT_UPDATED, slot_updated, 0); + } + #[inline] + pub fn add_update_version( + &mut self, + update_version: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + UpdatedBool::VT_UPDATE_VERSION, + update_version, + ); + } + #[inline] + pub fn add_value(&mut self, value: bool) { + self.fbb_ + .push_slot::(UpdatedBool::VT_VALUE, value, false); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> UpdatedBoolBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + UpdatedBoolBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for UpdatedBool<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("UpdatedBool"); + ds.field("slot_updated", &self.slot_updated()); + ds.field("update_version", &self.update_version()); + ds.field("value", &self.value()); + ds.finish() + } + } + pub enum UpdatedU64Offset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct UpdatedU64<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for UpdatedU64<'a> { + type Inner = UpdatedU64<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> UpdatedU64<'a> { + pub const VT_SLOT_UPDATED: flatbuffers::VOffsetT = 4; + pub const VT_UPDATE_VERSION: flatbuffers::VOffsetT = 6; + pub const VT_VALUE: flatbuffers::VOffsetT = 8; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + UpdatedU64 { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args UpdatedU64Args<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = UpdatedU64Builder::new(_fbb); + builder.add_value(args.value); + builder.add_slot_updated(args.slot_updated); + if let Some(x) = args.update_version { + builder.add_update_version(x); + } + builder.finish() + } + + #[inline] + pub fn slot_updated(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(UpdatedU64::VT_SLOT_UPDATED, Some(0)) + .unwrap() + } + } + #[inline] + pub fn update_version(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + UpdatedU64::VT_UPDATE_VERSION, + None, + ) + } + } + #[inline] + pub fn value(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(UpdatedU64::VT_VALUE, Some(0)).unwrap() } + } + } + + impl flatbuffers::Verifiable for UpdatedU64<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("slot_updated", Self::VT_SLOT_UPDATED, false)? + .visit_field::>( + "update_version", + Self::VT_UPDATE_VERSION, + false, + )? + .visit_field::("value", Self::VT_VALUE, false)? + .finish(); + Ok(()) + } + } + pub struct UpdatedU64Args<'a> { + pub slot_updated: u64, + pub update_version: Option>>, + pub value: u64, + } + impl<'a> Default for UpdatedU64Args<'a> { + #[inline] + fn default() -> Self { + UpdatedU64Args { + slot_updated: 0, + update_version: None, + value: 0, + } + } + } + + pub struct UpdatedU64Builder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> UpdatedU64Builder<'a, 'b, A> { + #[inline] + pub fn add_slot_updated(&mut self, slot_updated: u64) { + self.fbb_ + .push_slot::(UpdatedU64::VT_SLOT_UPDATED, slot_updated, 0); + } + #[inline] + pub fn add_update_version( + &mut self, + update_version: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + UpdatedU64::VT_UPDATE_VERSION, + update_version, + ); + } + #[inline] + pub fn add_value(&mut self, value: u64) { + self.fbb_.push_slot::(UpdatedU64::VT_VALUE, value, 0); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> UpdatedU64Builder<'a, 'b, A> { + let start = _fbb.start_table(); + UpdatedU64Builder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for UpdatedU64<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("UpdatedU64"); + ds.field("slot_updated", &self.slot_updated()); + ds.field("update_version", &self.update_version()); + ds.field("value", &self.value()); + ds.finish() + } + } + pub enum UpdatedU32Offset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct UpdatedU32<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for UpdatedU32<'a> { + type Inner = UpdatedU32<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> UpdatedU32<'a> { + pub const VT_SLOT_UPDATED: flatbuffers::VOffsetT = 4; + pub const VT_UPDATE_VERSION: flatbuffers::VOffsetT = 6; + pub const VT_VALUE: flatbuffers::VOffsetT = 8; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + UpdatedU32 { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args UpdatedU32Args<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = UpdatedU32Builder::new(_fbb); + builder.add_slot_updated(args.slot_updated); + builder.add_value(args.value); + if let Some(x) = args.update_version { + builder.add_update_version(x); + } + builder.finish() + } + + #[inline] + pub fn slot_updated(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(UpdatedU32::VT_SLOT_UPDATED, Some(0)) + .unwrap() + } + } + #[inline] + pub fn update_version(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + UpdatedU32::VT_UPDATE_VERSION, + None, + ) + } + } + #[inline] + pub fn value(&self) -> u32 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(UpdatedU32::VT_VALUE, Some(0)).unwrap() } + } + } + + impl flatbuffers::Verifiable for UpdatedU32<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("slot_updated", Self::VT_SLOT_UPDATED, false)? + .visit_field::>( + "update_version", + Self::VT_UPDATE_VERSION, + false, + )? + .visit_field::("value", Self::VT_VALUE, false)? + .finish(); + Ok(()) + } + } + pub struct UpdatedU32Args<'a> { + pub slot_updated: u64, + pub update_version: Option>>, + pub value: u32, + } + impl<'a> Default for UpdatedU32Args<'a> { + #[inline] + fn default() -> Self { + UpdatedU32Args { + slot_updated: 0, + update_version: None, + value: 0, + } + } + } + + pub struct UpdatedU32Builder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> UpdatedU32Builder<'a, 'b, A> { + #[inline] + pub fn add_slot_updated(&mut self, slot_updated: u64) { + self.fbb_ + .push_slot::(UpdatedU32::VT_SLOT_UPDATED, slot_updated, 0); + } + #[inline] + pub fn add_update_version( + &mut self, + update_version: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + UpdatedU32::VT_UPDATE_VERSION, + update_version, + ); + } + #[inline] + pub fn add_value(&mut self, value: u32) { + self.fbb_.push_slot::(UpdatedU32::VT_VALUE, value, 0); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> UpdatedU32Builder<'a, 'b, A> { + let start = _fbb.start_table(); + UpdatedU32Builder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for UpdatedU32<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("UpdatedU32"); + ds.field("slot_updated", &self.slot_updated()); + ds.field("update_version", &self.update_version()); + ds.field("value", &self.value()); + ds.finish() + } + } + pub enum UpdatedStringOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct UpdatedString<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for UpdatedString<'a> { + type Inner = UpdatedString<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> UpdatedString<'a> { + pub const VT_SLOT_UPDATED: flatbuffers::VOffsetT = 4; + pub const VT_UPDATE_VERSION: flatbuffers::VOffsetT = 6; + pub const VT_VALUE: flatbuffers::VOffsetT = 8; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + UpdatedString { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args UpdatedStringArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = UpdatedStringBuilder::new(_fbb); + builder.add_slot_updated(args.slot_updated); + if let Some(x) = args.value { + builder.add_value(x); + } + if let Some(x) = args.update_version { + builder.add_update_version(x); + } + builder.finish() + } + + #[inline] + pub fn slot_updated(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(UpdatedString::VT_SLOT_UPDATED, Some(0)) + .unwrap() + } + } + #[inline] + pub fn update_version(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + UpdatedString::VT_UPDATE_VERSION, + None, + ) + } + } + #[inline] + pub fn value(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>(UpdatedString::VT_VALUE, None) + } + } + } + + impl flatbuffers::Verifiable for UpdatedString<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("slot_updated", Self::VT_SLOT_UPDATED, false)? + .visit_field::>( + "update_version", + Self::VT_UPDATE_VERSION, + false, + )? + .visit_field::>("value", Self::VT_VALUE, false)? + .finish(); + Ok(()) + } + } + pub struct UpdatedStringArgs<'a> { + pub slot_updated: u64, + pub update_version: Option>>, + pub value: Option>, + } + impl<'a> Default for UpdatedStringArgs<'a> { + #[inline] + fn default() -> Self { + UpdatedStringArgs { + slot_updated: 0, + update_version: None, + value: None, + } + } + } + + pub struct UpdatedStringBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> UpdatedStringBuilder<'a, 'b, A> { + #[inline] + pub fn add_slot_updated(&mut self, slot_updated: u64) { + self.fbb_ + .push_slot::(UpdatedString::VT_SLOT_UPDATED, slot_updated, 0); + } + #[inline] + pub fn add_update_version( + &mut self, + update_version: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + UpdatedString::VT_UPDATE_VERSION, + update_version, + ); + } + #[inline] + pub fn add_value(&mut self, value: flatbuffers::WIPOffset<&'b str>) { + self.fbb_ + .push_slot_always::>(UpdatedString::VT_VALUE, value); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> UpdatedStringBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + UpdatedStringBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for UpdatedString<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("UpdatedString"); + ds.field("slot_updated", &self.slot_updated()); + ds.field("update_version", &self.update_version()); + ds.field("value", &self.value()); + ds.finish() + } + } + pub enum UpdatedPubkeyOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct UpdatedPubkey<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for UpdatedPubkey<'a> { + type Inner = UpdatedPubkey<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> UpdatedPubkey<'a> { + pub const VT_SLOT_UPDATED: flatbuffers::VOffsetT = 4; + pub const VT_UPDATE_VERSION: flatbuffers::VOffsetT = 6; + pub const VT_VALUE: flatbuffers::VOffsetT = 8; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + UpdatedPubkey { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args UpdatedPubkeyArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = UpdatedPubkeyBuilder::new(_fbb); + builder.add_slot_updated(args.slot_updated); + if let Some(x) = args.value { + builder.add_value(x); + } + if let Some(x) = args.update_version { + builder.add_update_version(x); + } + builder.finish() + } + + #[inline] + pub fn slot_updated(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(UpdatedPubkey::VT_SLOT_UPDATED, Some(0)) + .unwrap() + } + } + #[inline] + pub fn update_version(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + UpdatedPubkey::VT_UPDATE_VERSION, + None, + ) + } + } + #[inline] + pub fn value(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>( + UpdatedPubkey::VT_VALUE, + None, + ) + } + } + } + + impl flatbuffers::Verifiable for UpdatedPubkey<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("slot_updated", Self::VT_SLOT_UPDATED, false)? + .visit_field::>( + "update_version", + Self::VT_UPDATE_VERSION, + false, + )? + .visit_field::>>( + "value", + Self::VT_VALUE, + false, + )? + .finish(); + Ok(()) + } + } + pub struct UpdatedPubkeyArgs<'a> { + pub slot_updated: u64, + pub update_version: Option>>, + pub value: Option>>, + } + impl<'a> Default for UpdatedPubkeyArgs<'a> { + #[inline] + fn default() -> Self { + UpdatedPubkeyArgs { + slot_updated: 0, + update_version: None, + value: None, + } + } + } + + pub struct UpdatedPubkeyBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> UpdatedPubkeyBuilder<'a, 'b, A> { + #[inline] + pub fn add_slot_updated(&mut self, slot_updated: u64) { + self.fbb_ + .push_slot::(UpdatedPubkey::VT_SLOT_UPDATED, slot_updated, 0); + } + #[inline] + pub fn add_update_version( + &mut self, + update_version: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + UpdatedPubkey::VT_UPDATE_VERSION, + update_version, + ); + } + #[inline] + pub fn add_value(&mut self, value: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>(UpdatedPubkey::VT_VALUE, value); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> UpdatedPubkeyBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + UpdatedPubkeyBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for UpdatedPubkey<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("UpdatedPubkey"); + ds.field("slot_updated", &self.slot_updated()); + ds.field("update_version", &self.update_version()); + ds.field("value", &self.value()); + ds.finish() + } + } + pub enum UpdatedOptionalPubkeyOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct UpdatedOptionalPubkey<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for UpdatedOptionalPubkey<'a> { + type Inner = UpdatedOptionalPubkey<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> UpdatedOptionalPubkey<'a> { + pub const VT_SLOT_UPDATED: flatbuffers::VOffsetT = 4; + pub const VT_UPDATE_VERSION: flatbuffers::VOffsetT = 6; + pub const VT_VALUE: flatbuffers::VOffsetT = 8; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + UpdatedOptionalPubkey { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args UpdatedOptionalPubkeyArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = UpdatedOptionalPubkeyBuilder::new(_fbb); + builder.add_slot_updated(args.slot_updated); + if let Some(x) = args.value { + builder.add_value(x); + } + if let Some(x) = args.update_version { + builder.add_update_version(x); + } + builder.finish() + } + + #[inline] + pub fn slot_updated(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(UpdatedOptionalPubkey::VT_SLOT_UPDATED, Some(0)) + .unwrap() + } + } + #[inline] + pub fn update_version(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + UpdatedOptionalPubkey::VT_UPDATE_VERSION, + None, + ) + } + } + #[inline] + pub fn value(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>( + UpdatedOptionalPubkey::VT_VALUE, + None, + ) + } + } + } + + impl flatbuffers::Verifiable for UpdatedOptionalPubkey<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("slot_updated", Self::VT_SLOT_UPDATED, false)? + .visit_field::>( + "update_version", + Self::VT_UPDATE_VERSION, + false, + )? + .visit_field::>>( + "value", + Self::VT_VALUE, + false, + )? + .finish(); + Ok(()) + } + } + pub struct UpdatedOptionalPubkeyArgs<'a> { + pub slot_updated: u64, + pub update_version: Option>>, + pub value: Option>>, + } + impl<'a> Default for UpdatedOptionalPubkeyArgs<'a> { + #[inline] + fn default() -> Self { + UpdatedOptionalPubkeyArgs { + slot_updated: 0, + update_version: None, + value: None, + } + } + } + + pub struct UpdatedOptionalPubkeyBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> UpdatedOptionalPubkeyBuilder<'a, 'b, A> { + #[inline] + pub fn add_slot_updated(&mut self, slot_updated: u64) { + self.fbb_ + .push_slot::(UpdatedOptionalPubkey::VT_SLOT_UPDATED, slot_updated, 0); + } + #[inline] + pub fn add_update_version( + &mut self, + update_version: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + UpdatedOptionalPubkey::VT_UPDATE_VERSION, + update_version, + ); + } + #[inline] + pub fn add_value(&mut self, value: flatbuffers::WIPOffset>) { + self.fbb_.push_slot_always::>( + UpdatedOptionalPubkey::VT_VALUE, + value, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> UpdatedOptionalPubkeyBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + UpdatedOptionalPubkeyBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for UpdatedOptionalPubkey<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("UpdatedOptionalPubkey"); + ds.field("slot_updated", &self.slot_updated()); + ds.field("update_version", &self.update_version()); + ds.field("value", &self.value()); + ds.finish() + } + } + pub enum UpdatedCreatorsOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct UpdatedCreators<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for UpdatedCreators<'a> { + type Inner = UpdatedCreators<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> UpdatedCreators<'a> { + pub const VT_SLOT_UPDATED: flatbuffers::VOffsetT = 4; + pub const VT_UPDATE_VERSION: flatbuffers::VOffsetT = 6; + pub const VT_VALUE: flatbuffers::VOffsetT = 8; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + UpdatedCreators { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args UpdatedCreatorsArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = UpdatedCreatorsBuilder::new(_fbb); + builder.add_slot_updated(args.slot_updated); + if let Some(x) = args.value { + builder.add_value(x); + } + if let Some(x) = args.update_version { + builder.add_update_version(x); + } + builder.finish() + } + + #[inline] + pub fn slot_updated(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(UpdatedCreators::VT_SLOT_UPDATED, Some(0)) + .unwrap() + } + } + #[inline] + pub fn update_version(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + UpdatedCreators::VT_UPDATE_VERSION, + None, + ) + } + } + #[inline] + pub fn value( + &self, + ) -> Option>>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>, + >>(UpdatedCreators::VT_VALUE, None) + } + } + } + + impl flatbuffers::Verifiable for UpdatedCreators<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("slot_updated", Self::VT_SLOT_UPDATED, false)? + .visit_field::>( + "update_version", + Self::VT_UPDATE_VERSION, + false, + )? + .visit_field::>, + >>("value", Self::VT_VALUE, false)? + .finish(); + Ok(()) + } + } + pub struct UpdatedCreatorsArgs<'a> { + pub slot_updated: u64, + pub update_version: Option>>, + pub value: Option< + flatbuffers::WIPOffset< + flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset>>, + >, + >, + } + impl<'a> Default for UpdatedCreatorsArgs<'a> { + #[inline] + fn default() -> Self { + UpdatedCreatorsArgs { + slot_updated: 0, + update_version: None, + value: None, + } + } + } + + pub struct UpdatedCreatorsBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> UpdatedCreatorsBuilder<'a, 'b, A> { + #[inline] + pub fn add_slot_updated(&mut self, slot_updated: u64) { + self.fbb_ + .push_slot::(UpdatedCreators::VT_SLOT_UPDATED, slot_updated, 0); + } + #[inline] + pub fn add_update_version( + &mut self, + update_version: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + UpdatedCreators::VT_UPDATE_VERSION, + update_version, + ); + } + #[inline] + pub fn add_value( + &mut self, + value: flatbuffers::WIPOffset< + flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset>>, + >, + ) { + self.fbb_ + .push_slot_always::>(UpdatedCreators::VT_VALUE, value); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> UpdatedCreatorsBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + UpdatedCreatorsBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for UpdatedCreators<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("UpdatedCreators"); + ds.field("slot_updated", &self.slot_updated()); + ds.field("update_version", &self.update_version()); + ds.field("value", &self.value()); + ds.finish() + } + } + pub enum UpdatedChainMutabilityOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct UpdatedChainMutability<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for UpdatedChainMutability<'a> { + type Inner = UpdatedChainMutability<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> UpdatedChainMutability<'a> { + pub const VT_SLOT_UPDATED: flatbuffers::VOffsetT = 4; + pub const VT_UPDATE_VERSION: flatbuffers::VOffsetT = 6; + pub const VT_VALUE: flatbuffers::VOffsetT = 8; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + UpdatedChainMutability { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args UpdatedChainMutabilityArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = UpdatedChainMutabilityBuilder::new(_fbb); + builder.add_slot_updated(args.slot_updated); + if let Some(x) = args.update_version { + builder.add_update_version(x); + } + builder.add_value(args.value); + builder.finish() + } + + #[inline] + pub fn slot_updated(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(UpdatedChainMutability::VT_SLOT_UPDATED, Some(0)) + .unwrap() + } + } + #[inline] + pub fn update_version(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + UpdatedChainMutability::VT_UPDATE_VERSION, + None, + ) + } + } + #[inline] + pub fn value(&self) -> ChainMutability { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::( + UpdatedChainMutability::VT_VALUE, + Some(ChainMutability::Immutable), + ) + .unwrap() + } + } + } + + impl flatbuffers::Verifiable for UpdatedChainMutability<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("slot_updated", Self::VT_SLOT_UPDATED, false)? + .visit_field::>( + "update_version", + Self::VT_UPDATE_VERSION, + false, + )? + .visit_field::("value", Self::VT_VALUE, false)? + .finish(); + Ok(()) + } + } + pub struct UpdatedChainMutabilityArgs<'a> { + pub slot_updated: u64, + pub update_version: Option>>, + pub value: ChainMutability, + } + impl<'a> Default for UpdatedChainMutabilityArgs<'a> { + #[inline] + fn default() -> Self { + UpdatedChainMutabilityArgs { + slot_updated: 0, + update_version: None, + value: ChainMutability::Immutable, + } + } + } + + pub struct UpdatedChainMutabilityBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> UpdatedChainMutabilityBuilder<'a, 'b, A> { + #[inline] + pub fn add_slot_updated(&mut self, slot_updated: u64) { + self.fbb_ + .push_slot::(UpdatedChainMutability::VT_SLOT_UPDATED, slot_updated, 0); + } + #[inline] + pub fn add_update_version( + &mut self, + update_version: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + UpdatedChainMutability::VT_UPDATE_VERSION, + update_version, + ); + } + #[inline] + pub fn add_value(&mut self, value: ChainMutability) { + self.fbb_.push_slot::( + UpdatedChainMutability::VT_VALUE, + value, + ChainMutability::Immutable, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> UpdatedChainMutabilityBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + UpdatedChainMutabilityBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for UpdatedChainMutability<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("UpdatedChainMutability"); + ds.field("slot_updated", &self.slot_updated()); + ds.field("update_version", &self.update_version()); + ds.field("value", &self.value()); + ds.finish() + } + } + pub enum UpdatedOwnerTypeOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct UpdatedOwnerType<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for UpdatedOwnerType<'a> { + type Inner = UpdatedOwnerType<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> UpdatedOwnerType<'a> { + pub const VT_SLOT_UPDATED: flatbuffers::VOffsetT = 4; + pub const VT_UPDATE_VERSION: flatbuffers::VOffsetT = 6; + pub const VT_VALUE: flatbuffers::VOffsetT = 8; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + UpdatedOwnerType { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args UpdatedOwnerTypeArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = UpdatedOwnerTypeBuilder::new(_fbb); + builder.add_slot_updated(args.slot_updated); + if let Some(x) = args.update_version { + builder.add_update_version(x); + } + builder.add_value(args.value); + builder.finish() + } + + #[inline] + pub fn slot_updated(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(UpdatedOwnerType::VT_SLOT_UPDATED, Some(0)) + .unwrap() + } + } + #[inline] + pub fn update_version(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + UpdatedOwnerType::VT_UPDATE_VERSION, + None, + ) + } + } + #[inline] + pub fn value(&self) -> OwnerType { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(UpdatedOwnerType::VT_VALUE, Some(OwnerType::Unknown)) + .unwrap() + } + } + } + + impl flatbuffers::Verifiable for UpdatedOwnerType<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("slot_updated", Self::VT_SLOT_UPDATED, false)? + .visit_field::>( + "update_version", + Self::VT_UPDATE_VERSION, + false, + )? + .visit_field::("value", Self::VT_VALUE, false)? + .finish(); + Ok(()) + } + } + pub struct UpdatedOwnerTypeArgs<'a> { + pub slot_updated: u64, + pub update_version: Option>>, + pub value: OwnerType, + } + impl<'a> Default for UpdatedOwnerTypeArgs<'a> { + #[inline] + fn default() -> Self { + UpdatedOwnerTypeArgs { + slot_updated: 0, + update_version: None, + value: OwnerType::Unknown, + } + } + } + + pub struct UpdatedOwnerTypeBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> UpdatedOwnerTypeBuilder<'a, 'b, A> { + #[inline] + pub fn add_slot_updated(&mut self, slot_updated: u64) { + self.fbb_ + .push_slot::(UpdatedOwnerType::VT_SLOT_UPDATED, slot_updated, 0); + } + #[inline] + pub fn add_update_version( + &mut self, + update_version: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + UpdatedOwnerType::VT_UPDATE_VERSION, + update_version, + ); + } + #[inline] + pub fn add_value(&mut self, value: OwnerType) { + self.fbb_ + .push_slot::(UpdatedOwnerType::VT_VALUE, value, OwnerType::Unknown); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> UpdatedOwnerTypeBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + UpdatedOwnerTypeBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for UpdatedOwnerType<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("UpdatedOwnerType"); + ds.field("slot_updated", &self.slot_updated()); + ds.field("update_version", &self.update_version()); + ds.field("value", &self.value()); + ds.finish() + } + } + pub enum CreatorOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct Creator<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for Creator<'a> { + type Inner = Creator<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> Creator<'a> { + pub const VT_CREATOR: flatbuffers::VOffsetT = 4; + pub const VT_CREATOR_VERIFIED: flatbuffers::VOffsetT = 6; + pub const VT_CREATOR_SHARE: flatbuffers::VOffsetT = 8; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + Creator { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args CreatorArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = CreatorBuilder::new(_fbb); + builder.add_creator_share(args.creator_share); + if let Some(x) = args.creator { + builder.add_creator(x); + } + builder.add_creator_verified(args.creator_verified); + builder.finish() + } + + #[inline] + pub fn creator(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>( + Creator::VT_CREATOR, + None, + ) + } + } + #[inline] + pub fn creator_verified(&self) -> bool { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(Creator::VT_CREATOR_VERIFIED, Some(false)) + .unwrap() + } + } + #[inline] + pub fn creator_share(&self) -> u32 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(Creator::VT_CREATOR_SHARE, Some(0)) + .unwrap() + } + } + } + + impl flatbuffers::Verifiable for Creator<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>>( + "creator", + Self::VT_CREATOR, + false, + )? + .visit_field::("creator_verified", Self::VT_CREATOR_VERIFIED, false)? + .visit_field::("creator_share", Self::VT_CREATOR_SHARE, false)? + .finish(); + Ok(()) + } + } + pub struct CreatorArgs<'a> { + pub creator: Option>>, + pub creator_verified: bool, + pub creator_share: u32, + } + impl<'a> Default for CreatorArgs<'a> { + #[inline] + fn default() -> Self { + CreatorArgs { + creator: None, + creator_verified: false, + creator_share: 0, + } + } + } + + pub struct CreatorBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> CreatorBuilder<'a, 'b, A> { + #[inline] + pub fn add_creator( + &mut self, + creator: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>(Creator::VT_CREATOR, creator); + } + #[inline] + pub fn add_creator_verified(&mut self, creator_verified: bool) { + self.fbb_ + .push_slot::(Creator::VT_CREATOR_VERIFIED, creator_verified, false); + } + #[inline] + pub fn add_creator_share(&mut self, creator_share: u32) { + self.fbb_ + .push_slot::(Creator::VT_CREATOR_SHARE, creator_share, 0); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> CreatorBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + CreatorBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for Creator<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("Creator"); + ds.field("creator", &self.creator()); + ds.field("creator_verified", &self.creator_verified()); + ds.field("creator_share", &self.creator_share()); + ds.finish() + } + } + pub enum AssetStaticDetailsOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct AssetStaticDetails<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for AssetStaticDetails<'a> { + type Inner = AssetStaticDetails<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> AssetStaticDetails<'a> { + pub const VT_PUBKEY: flatbuffers::VOffsetT = 4; + pub const VT_SPECIFICATION_ASSET_CLASS: flatbuffers::VOffsetT = 6; + pub const VT_ROYALTY_TARGET_TYPE: flatbuffers::VOffsetT = 8; + pub const VT_CREATED_AT: flatbuffers::VOffsetT = 10; + pub const VT_EDITION_ADDRESS: flatbuffers::VOffsetT = 12; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + AssetStaticDetails { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args AssetStaticDetailsArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = AssetStaticDetailsBuilder::new(_fbb); + builder.add_created_at(args.created_at); + if let Some(x) = args.edition_address { + builder.add_edition_address(x); + } + if let Some(x) = args.pubkey { + builder.add_pubkey(x); + } + builder.add_royalty_target_type(args.royalty_target_type); + builder.add_specification_asset_class(args.specification_asset_class); + builder.finish() + } + + #[inline] + pub fn pubkey(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>( + AssetStaticDetails::VT_PUBKEY, + None, + ) + } + } + #[inline] + pub fn specification_asset_class(&self) -> SpecificationAssetClass { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::( + AssetStaticDetails::VT_SPECIFICATION_ASSET_CLASS, + Some(SpecificationAssetClass::Unknown), + ) + .unwrap() + } + } + #[inline] + pub fn royalty_target_type(&self) -> RoyaltyTargetType { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::( + AssetStaticDetails::VT_ROYALTY_TARGET_TYPE, + Some(RoyaltyTargetType::Unknown), + ) + .unwrap() + } + } + #[inline] + pub fn created_at(&self) -> i64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(AssetStaticDetails::VT_CREATED_AT, Some(0)) + .unwrap() + } + } + #[inline] + pub fn edition_address(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>( + AssetStaticDetails::VT_EDITION_ADDRESS, + None, + ) + } + } + } + + impl flatbuffers::Verifiable for AssetStaticDetails<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>>( + "pubkey", + Self::VT_PUBKEY, + false, + )? + .visit_field::( + "specification_asset_class", + Self::VT_SPECIFICATION_ASSET_CLASS, + false, + )? + .visit_field::( + "royalty_target_type", + Self::VT_ROYALTY_TARGET_TYPE, + false, + )? + .visit_field::("created_at", Self::VT_CREATED_AT, false)? + .visit_field::>>( + "edition_address", + Self::VT_EDITION_ADDRESS, + false, + )? + .finish(); + Ok(()) + } + } + pub struct AssetStaticDetailsArgs<'a> { + pub pubkey: Option>>, + pub specification_asset_class: SpecificationAssetClass, + pub royalty_target_type: RoyaltyTargetType, + pub created_at: i64, + pub edition_address: Option>>, + } + impl<'a> Default for AssetStaticDetailsArgs<'a> { + #[inline] + fn default() -> Self { + AssetStaticDetailsArgs { + pubkey: None, + specification_asset_class: SpecificationAssetClass::Unknown, + royalty_target_type: RoyaltyTargetType::Unknown, + created_at: 0, + edition_address: None, + } + } + } + + pub struct AssetStaticDetailsBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> AssetStaticDetailsBuilder<'a, 'b, A> { + #[inline] + pub fn add_pubkey(&mut self, pubkey: flatbuffers::WIPOffset>) { + self.fbb_.push_slot_always::>( + AssetStaticDetails::VT_PUBKEY, + pubkey, + ); + } + #[inline] + pub fn add_specification_asset_class( + &mut self, + specification_asset_class: SpecificationAssetClass, + ) { + self.fbb_.push_slot::( + AssetStaticDetails::VT_SPECIFICATION_ASSET_CLASS, + specification_asset_class, + SpecificationAssetClass::Unknown, + ); + } + #[inline] + pub fn add_royalty_target_type(&mut self, royalty_target_type: RoyaltyTargetType) { + self.fbb_.push_slot::( + AssetStaticDetails::VT_ROYALTY_TARGET_TYPE, + royalty_target_type, + RoyaltyTargetType::Unknown, + ); + } + #[inline] + pub fn add_created_at(&mut self, created_at: i64) { + self.fbb_ + .push_slot::(AssetStaticDetails::VT_CREATED_AT, created_at, 0); + } + #[inline] + pub fn add_edition_address( + &mut self, + edition_address: flatbuffers::WIPOffset>, + ) { + self.fbb_.push_slot_always::>( + AssetStaticDetails::VT_EDITION_ADDRESS, + edition_address, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> AssetStaticDetailsBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + AssetStaticDetailsBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for AssetStaticDetails<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("AssetStaticDetails"); + ds.field("pubkey", &self.pubkey()); + ds.field( + "specification_asset_class", + &self.specification_asset_class(), + ); + ds.field("royalty_target_type", &self.royalty_target_type()); + ds.field("created_at", &self.created_at()); + ds.field("edition_address", &self.edition_address()); + ds.finish() + } + } + pub enum AssetDynamicDetailsOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct AssetDynamicDetails<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for AssetDynamicDetails<'a> { + type Inner = AssetDynamicDetails<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> AssetDynamicDetails<'a> { + pub const VT_PUBKEY: flatbuffers::VOffsetT = 4; + pub const VT_IS_COMPRESSIBLE: flatbuffers::VOffsetT = 6; + pub const VT_IS_COMPRESSED: flatbuffers::VOffsetT = 8; + pub const VT_IS_FROZEN: flatbuffers::VOffsetT = 10; + pub const VT_SUPPLY: flatbuffers::VOffsetT = 12; + pub const VT_SEQ: flatbuffers::VOffsetT = 14; + pub const VT_IS_BURNT: flatbuffers::VOffsetT = 16; + pub const VT_WAS_DECOMPRESSED: flatbuffers::VOffsetT = 18; + pub const VT_ONCHAIN_DATA: flatbuffers::VOffsetT = 20; + pub const VT_CREATORS: flatbuffers::VOffsetT = 22; + pub const VT_ROYALTY_AMOUNT: flatbuffers::VOffsetT = 24; + pub const VT_URL: flatbuffers::VOffsetT = 26; + pub const VT_CHAIN_MUTABILITY: flatbuffers::VOffsetT = 28; + pub const VT_LAMPORTS: flatbuffers::VOffsetT = 30; + pub const VT_EXECUTABLE: flatbuffers::VOffsetT = 32; + pub const VT_METADATA_OWNER: flatbuffers::VOffsetT = 34; + pub const VT_RAW_NAME: flatbuffers::VOffsetT = 36; + pub const VT_MPL_CORE_PLUGINS: flatbuffers::VOffsetT = 38; + pub const VT_MPL_CORE_UNKNOWN_PLUGINS: flatbuffers::VOffsetT = 40; + pub const VT_RENT_EPOCH: flatbuffers::VOffsetT = 42; + pub const VT_NUM_MINTED: flatbuffers::VOffsetT = 44; + pub const VT_CURRENT_SIZE: flatbuffers::VOffsetT = 46; + pub const VT_PLUGINS_JSON_VERSION: flatbuffers::VOffsetT = 48; + pub const VT_MPL_CORE_EXTERNAL_PLUGINS: flatbuffers::VOffsetT = 50; + pub const VT_MPL_CORE_UNKNOWN_EXTERNAL_PLUGINS: flatbuffers::VOffsetT = 52; + pub const VT_MINT_EXTENSIONS: flatbuffers::VOffsetT = 54; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + AssetDynamicDetails { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args AssetDynamicDetailsArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = AssetDynamicDetailsBuilder::new(_fbb); + if let Some(x) = args.mint_extensions { + builder.add_mint_extensions(x); + } + if let Some(x) = args.mpl_core_unknown_external_plugins { + builder.add_mpl_core_unknown_external_plugins(x); + } + if let Some(x) = args.mpl_core_external_plugins { + builder.add_mpl_core_external_plugins(x); + } + if let Some(x) = args.plugins_json_version { + builder.add_plugins_json_version(x); + } + if let Some(x) = args.current_size { + builder.add_current_size(x); + } + if let Some(x) = args.num_minted { + builder.add_num_minted(x); + } + if let Some(x) = args.rent_epoch { + builder.add_rent_epoch(x); + } + if let Some(x) = args.mpl_core_unknown_plugins { + builder.add_mpl_core_unknown_plugins(x); + } + if let Some(x) = args.mpl_core_plugins { + builder.add_mpl_core_plugins(x); + } + if let Some(x) = args.raw_name { + builder.add_raw_name(x); + } + if let Some(x) = args.metadata_owner { + builder.add_metadata_owner(x); + } + if let Some(x) = args.executable { + builder.add_executable(x); + } + if let Some(x) = args.lamports { + builder.add_lamports(x); + } + if let Some(x) = args.chain_mutability { + builder.add_chain_mutability(x); + } + if let Some(x) = args.url { + builder.add_url(x); + } + if let Some(x) = args.royalty_amount { + builder.add_royalty_amount(x); + } + if let Some(x) = args.creators { + builder.add_creators(x); + } + if let Some(x) = args.onchain_data { + builder.add_onchain_data(x); + } + if let Some(x) = args.was_decompressed { + builder.add_was_decompressed(x); + } + if let Some(x) = args.is_burnt { + builder.add_is_burnt(x); + } + if let Some(x) = args.seq { + builder.add_seq(x); + } + if let Some(x) = args.supply { + builder.add_supply(x); + } + if let Some(x) = args.is_frozen { + builder.add_is_frozen(x); + } + if let Some(x) = args.is_compressed { + builder.add_is_compressed(x); + } + if let Some(x) = args.is_compressible { + builder.add_is_compressible(x); + } + if let Some(x) = args.pubkey { + builder.add_pubkey(x); + } + builder.finish() + } + + #[inline] + pub fn pubkey(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>( + AssetDynamicDetails::VT_PUBKEY, + None, + ) + } + } + #[inline] + pub fn is_compressible(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_IS_COMPRESSIBLE, + None, + ) + } + } + #[inline] + pub fn is_compressed(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_IS_COMPRESSED, + None, + ) + } + } + #[inline] + pub fn is_frozen(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_IS_FROZEN, + None, + ) + } + } + #[inline] + pub fn supply(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_SUPPLY, + None, + ) + } + } + #[inline] + pub fn seq(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_SEQ, + None, + ) + } + } + #[inline] + pub fn is_burnt(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_IS_BURNT, + None, + ) + } + } + #[inline] + pub fn was_decompressed(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_WAS_DECOMPRESSED, + None, + ) + } + } + #[inline] + pub fn onchain_data(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetDynamicDetails::VT_ONCHAIN_DATA, + None, + ) + } + } + #[inline] + pub fn creators(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetDynamicDetails::VT_CREATORS, + None, + ) + } + } + #[inline] + pub fn royalty_amount(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_ROYALTY_AMOUNT, + None, + ) + } + } + #[inline] + pub fn url(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetDynamicDetails::VT_URL, + None, + ) + } + } + #[inline] + pub fn chain_mutability(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetDynamicDetails::VT_CHAIN_MUTABILITY, + None, + ) + } + } + #[inline] + pub fn lamports(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_LAMPORTS, + None, + ) + } + } + #[inline] + pub fn executable(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_EXECUTABLE, + None, + ) + } + } + #[inline] + pub fn metadata_owner(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetDynamicDetails::VT_METADATA_OWNER, + None, + ) + } + } + #[inline] + pub fn raw_name(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetDynamicDetails::VT_RAW_NAME, + None, + ) + } + } + #[inline] + pub fn mpl_core_plugins(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetDynamicDetails::VT_MPL_CORE_PLUGINS, + None, + ) + } + } + #[inline] + pub fn mpl_core_unknown_plugins(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetDynamicDetails::VT_MPL_CORE_UNKNOWN_PLUGINS, + None, + ) + } + } + #[inline] + pub fn rent_epoch(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_RENT_EPOCH, + None, + ) + } + } + #[inline] + pub fn num_minted(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_NUM_MINTED, + None, + ) + } + } + #[inline] + pub fn current_size(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_CURRENT_SIZE, + None, + ) + } + } + #[inline] + pub fn plugins_json_version(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetDynamicDetails::VT_PLUGINS_JSON_VERSION, + None, + ) + } + } + #[inline] + pub fn mpl_core_external_plugins(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetDynamicDetails::VT_MPL_CORE_EXTERNAL_PLUGINS, + None, + ) + } + } + #[inline] + pub fn mpl_core_unknown_external_plugins(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetDynamicDetails::VT_MPL_CORE_UNKNOWN_EXTERNAL_PLUGINS, + None, + ) + } + } + #[inline] + pub fn mint_extensions(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetDynamicDetails::VT_MINT_EXTENSIONS, + None, + ) + } + } + } + + impl flatbuffers::Verifiable for AssetDynamicDetails<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>>( + "pubkey", + Self::VT_PUBKEY, + false, + )? + .visit_field::>( + "is_compressible", + Self::VT_IS_COMPRESSIBLE, + false, + )? + .visit_field::>( + "is_compressed", + Self::VT_IS_COMPRESSED, + false, + )? + .visit_field::>( + "is_frozen", + Self::VT_IS_FROZEN, + false, + )? + .visit_field::>( + "supply", + Self::VT_SUPPLY, + false, + )? + .visit_field::>( + "seq", + Self::VT_SEQ, + false, + )? + .visit_field::>( + "is_burnt", + Self::VT_IS_BURNT, + false, + )? + .visit_field::>( + "was_decompressed", + Self::VT_WAS_DECOMPRESSED, + false, + )? + .visit_field::>( + "onchain_data", + Self::VT_ONCHAIN_DATA, + false, + )? + .visit_field::>( + "creators", + Self::VT_CREATORS, + false, + )? + .visit_field::>( + "royalty_amount", + Self::VT_ROYALTY_AMOUNT, + false, + )? + .visit_field::>( + "url", + Self::VT_URL, + false, + )? + .visit_field::>( + "chain_mutability", + Self::VT_CHAIN_MUTABILITY, + false, + )? + .visit_field::>( + "lamports", + Self::VT_LAMPORTS, + false, + )? + .visit_field::>( + "executable", + Self::VT_EXECUTABLE, + false, + )? + .visit_field::>( + "metadata_owner", + Self::VT_METADATA_OWNER, + false, + )? + .visit_field::>( + "raw_name", + Self::VT_RAW_NAME, + false, + )? + .visit_field::>( + "mpl_core_plugins", + Self::VT_MPL_CORE_PLUGINS, + false, + )? + .visit_field::>( + "mpl_core_unknown_plugins", + Self::VT_MPL_CORE_UNKNOWN_PLUGINS, + false, + )? + .visit_field::>( + "rent_epoch", + Self::VT_RENT_EPOCH, + false, + )? + .visit_field::>( + "num_minted", + Self::VT_NUM_MINTED, + false, + )? + .visit_field::>( + "current_size", + Self::VT_CURRENT_SIZE, + false, + )? + .visit_field::>( + "plugins_json_version", + Self::VT_PLUGINS_JSON_VERSION, + false, + )? + .visit_field::>( + "mpl_core_external_plugins", + Self::VT_MPL_CORE_EXTERNAL_PLUGINS, + false, + )? + .visit_field::>( + "mpl_core_unknown_external_plugins", + Self::VT_MPL_CORE_UNKNOWN_EXTERNAL_PLUGINS, + false, + )? + .visit_field::>( + "mint_extensions", + Self::VT_MINT_EXTENSIONS, + false, + )? + .finish(); + Ok(()) + } + } + pub struct AssetDynamicDetailsArgs<'a> { + pub pubkey: Option>>, + pub is_compressible: Option>>, + pub is_compressed: Option>>, + pub is_frozen: Option>>, + pub supply: Option>>, + pub seq: Option>>, + pub is_burnt: Option>>, + pub was_decompressed: Option>>, + pub onchain_data: Option>>, + pub creators: Option>>, + pub royalty_amount: Option>>, + pub url: Option>>, + pub chain_mutability: Option>>, + pub lamports: Option>>, + pub executable: Option>>, + pub metadata_owner: Option>>, + pub raw_name: Option>>, + pub mpl_core_plugins: Option>>, + pub mpl_core_unknown_plugins: Option>>, + pub rent_epoch: Option>>, + pub num_minted: Option>>, + pub current_size: Option>>, + pub plugins_json_version: Option>>, + pub mpl_core_external_plugins: Option>>, + pub mpl_core_unknown_external_plugins: Option>>, + pub mint_extensions: Option>>, + } + impl<'a> Default for AssetDynamicDetailsArgs<'a> { + #[inline] + fn default() -> Self { + AssetDynamicDetailsArgs { + pubkey: None, + is_compressible: None, + is_compressed: None, + is_frozen: None, + supply: None, + seq: None, + is_burnt: None, + was_decompressed: None, + onchain_data: None, + creators: None, + royalty_amount: None, + url: None, + chain_mutability: None, + lamports: None, + executable: None, + metadata_owner: None, + raw_name: None, + mpl_core_plugins: None, + mpl_core_unknown_plugins: None, + rent_epoch: None, + num_minted: None, + current_size: None, + plugins_json_version: None, + mpl_core_external_plugins: None, + mpl_core_unknown_external_plugins: None, + mint_extensions: None, + } + } + } + + pub struct AssetDynamicDetailsBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> AssetDynamicDetailsBuilder<'a, 'b, A> { + #[inline] + pub fn add_pubkey(&mut self, pubkey: flatbuffers::WIPOffset>) { + self.fbb_.push_slot_always::>( + AssetDynamicDetails::VT_PUBKEY, + pubkey, + ); + } + #[inline] + pub fn add_is_compressible( + &mut self, + is_compressible: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_IS_COMPRESSIBLE, + is_compressible, + ); + } + #[inline] + pub fn add_is_compressed( + &mut self, + is_compressed: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_IS_COMPRESSED, + is_compressed, + ); + } + #[inline] + pub fn add_is_frozen(&mut self, is_frozen: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_IS_FROZEN, + is_frozen, + ); + } + #[inline] + pub fn add_supply(&mut self, supply: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_SUPPLY, + supply, + ); + } + #[inline] + pub fn add_seq(&mut self, seq: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_SEQ, + seq, + ); + } + #[inline] + pub fn add_is_burnt(&mut self, is_burnt: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_IS_BURNT, + is_burnt, + ); + } + #[inline] + pub fn add_was_decompressed( + &mut self, + was_decompressed: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_WAS_DECOMPRESSED, + was_decompressed, + ); + } + #[inline] + pub fn add_onchain_data( + &mut self, + onchain_data: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_ONCHAIN_DATA, + onchain_data, + ); + } + #[inline] + pub fn add_creators(&mut self, creators: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_CREATORS, + creators, + ); + } + #[inline] + pub fn add_royalty_amount( + &mut self, + royalty_amount: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_ROYALTY_AMOUNT, + royalty_amount, + ); + } + #[inline] + pub fn add_url(&mut self, url: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_URL, + url, + ); + } + #[inline] + pub fn add_chain_mutability( + &mut self, + chain_mutability: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_CHAIN_MUTABILITY, + chain_mutability, + ); + } + #[inline] + pub fn add_lamports(&mut self, lamports: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_LAMPORTS, + lamports, + ); + } + #[inline] + pub fn add_executable(&mut self, executable: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_EXECUTABLE, + executable, + ); + } + #[inline] + pub fn add_metadata_owner( + &mut self, + metadata_owner: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_METADATA_OWNER, + metadata_owner, + ); + } + #[inline] + pub fn add_raw_name(&mut self, raw_name: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_RAW_NAME, + raw_name, + ); + } + #[inline] + pub fn add_mpl_core_plugins( + &mut self, + mpl_core_plugins: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_MPL_CORE_PLUGINS, + mpl_core_plugins, + ); + } + #[inline] + pub fn add_mpl_core_unknown_plugins( + &mut self, + mpl_core_unknown_plugins: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_MPL_CORE_UNKNOWN_PLUGINS, + mpl_core_unknown_plugins, + ); + } + #[inline] + pub fn add_rent_epoch(&mut self, rent_epoch: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_RENT_EPOCH, + rent_epoch, + ); + } + #[inline] + pub fn add_num_minted(&mut self, num_minted: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_NUM_MINTED, + num_minted, + ); + } + #[inline] + pub fn add_current_size(&mut self, current_size: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_CURRENT_SIZE, + current_size, + ); + } + #[inline] + pub fn add_plugins_json_version( + &mut self, + plugins_json_version: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_PLUGINS_JSON_VERSION, + plugins_json_version, + ); + } + #[inline] + pub fn add_mpl_core_external_plugins( + &mut self, + mpl_core_external_plugins: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_MPL_CORE_EXTERNAL_PLUGINS, + mpl_core_external_plugins, + ); + } + #[inline] + pub fn add_mpl_core_unknown_external_plugins( + &mut self, + mpl_core_unknown_external_plugins: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_MPL_CORE_UNKNOWN_EXTERNAL_PLUGINS, + mpl_core_unknown_external_plugins, + ); + } + #[inline] + pub fn add_mint_extensions( + &mut self, + mint_extensions: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetDynamicDetails::VT_MINT_EXTENSIONS, + mint_extensions, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> AssetDynamicDetailsBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + AssetDynamicDetailsBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for AssetDynamicDetails<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("AssetDynamicDetails"); + ds.field("pubkey", &self.pubkey()); + ds.field("is_compressible", &self.is_compressible()); + ds.field("is_compressed", &self.is_compressed()); + ds.field("is_frozen", &self.is_frozen()); + ds.field("supply", &self.supply()); + ds.field("seq", &self.seq()); + ds.field("is_burnt", &self.is_burnt()); + ds.field("was_decompressed", &self.was_decompressed()); + ds.field("onchain_data", &self.onchain_data()); + ds.field("creators", &self.creators()); + ds.field("royalty_amount", &self.royalty_amount()); + ds.field("url", &self.url()); + ds.field("chain_mutability", &self.chain_mutability()); + ds.field("lamports", &self.lamports()); + ds.field("executable", &self.executable()); + ds.field("metadata_owner", &self.metadata_owner()); + ds.field("raw_name", &self.raw_name()); + ds.field("mpl_core_plugins", &self.mpl_core_plugins()); + ds.field("mpl_core_unknown_plugins", &self.mpl_core_unknown_plugins()); + ds.field("rent_epoch", &self.rent_epoch()); + ds.field("num_minted", &self.num_minted()); + ds.field("current_size", &self.current_size()); + ds.field("plugins_json_version", &self.plugins_json_version()); + ds.field( + "mpl_core_external_plugins", + &self.mpl_core_external_plugins(), + ); + ds.field( + "mpl_core_unknown_external_plugins", + &self.mpl_core_unknown_external_plugins(), + ); + ds.field("mint_extensions", &self.mint_extensions()); + ds.finish() + } + } + pub enum AssetAuthorityOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct AssetAuthority<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for AssetAuthority<'a> { + type Inner = AssetAuthority<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> AssetAuthority<'a> { + pub const VT_PUBKEY: flatbuffers::VOffsetT = 4; + pub const VT_AUTHORITY: flatbuffers::VOffsetT = 6; + pub const VT_SLOT_UPDATED: flatbuffers::VOffsetT = 8; + pub const VT_WRITE_VERSION: flatbuffers::VOffsetT = 10; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + AssetAuthority { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args AssetAuthorityArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = AssetAuthorityBuilder::new(_fbb); + builder.add_write_version(args.write_version); + builder.add_slot_updated(args.slot_updated); + if let Some(x) = args.authority { + builder.add_authority(x); + } + if let Some(x) = args.pubkey { + builder.add_pubkey(x); + } + builder.finish() + } + + #[inline] + pub fn pubkey(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>( + AssetAuthority::VT_PUBKEY, + None, + ) + } + } + #[inline] + pub fn authority(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>( + AssetAuthority::VT_AUTHORITY, + None, + ) + } + } + #[inline] + pub fn slot_updated(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(AssetAuthority::VT_SLOT_UPDATED, Some(0)) + .unwrap() + } + } + #[inline] + pub fn write_version(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(AssetAuthority::VT_WRITE_VERSION, Some(0)) + .unwrap() + } + } + } + + impl flatbuffers::Verifiable for AssetAuthority<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>>( + "pubkey", + Self::VT_PUBKEY, + false, + )? + .visit_field::>>( + "authority", + Self::VT_AUTHORITY, + false, + )? + .visit_field::("slot_updated", Self::VT_SLOT_UPDATED, false)? + .visit_field::("write_version", Self::VT_WRITE_VERSION, false)? + .finish(); + Ok(()) + } + } + pub struct AssetAuthorityArgs<'a> { + pub pubkey: Option>>, + pub authority: Option>>, + pub slot_updated: u64, + pub write_version: u64, + } + impl<'a> Default for AssetAuthorityArgs<'a> { + #[inline] + fn default() -> Self { + AssetAuthorityArgs { + pubkey: None, + authority: None, + slot_updated: 0, + write_version: 0, + } + } + } + + pub struct AssetAuthorityBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> AssetAuthorityBuilder<'a, 'b, A> { + #[inline] + pub fn add_pubkey(&mut self, pubkey: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>(AssetAuthority::VT_PUBKEY, pubkey); + } + #[inline] + pub fn add_authority( + &mut self, + authority: flatbuffers::WIPOffset>, + ) { + self.fbb_.push_slot_always::>( + AssetAuthority::VT_AUTHORITY, + authority, + ); + } + #[inline] + pub fn add_slot_updated(&mut self, slot_updated: u64) { + self.fbb_ + .push_slot::(AssetAuthority::VT_SLOT_UPDATED, slot_updated, 0); + } + #[inline] + pub fn add_write_version(&mut self, write_version: u64) { + self.fbb_ + .push_slot::(AssetAuthority::VT_WRITE_VERSION, write_version, 0); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> AssetAuthorityBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + AssetAuthorityBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for AssetAuthority<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("AssetAuthority"); + ds.field("pubkey", &self.pubkey()); + ds.field("authority", &self.authority()); + ds.field("slot_updated", &self.slot_updated()); + ds.field("write_version", &self.write_version()); + ds.finish() + } + } + pub enum AssetOwnerOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct AssetOwner<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for AssetOwner<'a> { + type Inner = AssetOwner<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> AssetOwner<'a> { + pub const VT_PUBKEY: flatbuffers::VOffsetT = 4; + pub const VT_OWNER: flatbuffers::VOffsetT = 6; + pub const VT_DELEGATE: flatbuffers::VOffsetT = 8; + pub const VT_OWNER_TYPE: flatbuffers::VOffsetT = 10; + pub const VT_OWNER_DELEGATE_SEQ: flatbuffers::VOffsetT = 12; + pub const VT_IS_CURRENT_OWNER: flatbuffers::VOffsetT = 14; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + AssetOwner { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args AssetOwnerArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = AssetOwnerBuilder::new(_fbb); + if let Some(x) = args.is_current_owner { + builder.add_is_current_owner(x); + } + if let Some(x) = args.owner_delegate_seq { + builder.add_owner_delegate_seq(x); + } + if let Some(x) = args.owner_type { + builder.add_owner_type(x); + } + if let Some(x) = args.delegate { + builder.add_delegate(x); + } + if let Some(x) = args.owner { + builder.add_owner(x); + } + if let Some(x) = args.pubkey { + builder.add_pubkey(x); + } + builder.finish() + } + + #[inline] + pub fn pubkey(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>( + AssetOwner::VT_PUBKEY, + None, + ) + } + } + #[inline] + pub fn owner(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetOwner::VT_OWNER, + None, + ) + } + } + #[inline] + pub fn delegate(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetOwner::VT_DELEGATE, + None, + ) + } + } + #[inline] + pub fn owner_type(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetOwner::VT_OWNER_TYPE, + None, + ) + } + } + #[inline] + pub fn owner_delegate_seq(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetOwner::VT_OWNER_DELEGATE_SEQ, + None, + ) + } + } + #[inline] + pub fn is_current_owner(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetOwner::VT_IS_CURRENT_OWNER, + None, + ) + } + } + } + + impl flatbuffers::Verifiable for AssetOwner<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>>( + "pubkey", + Self::VT_PUBKEY, + false, + )? + .visit_field::>( + "owner", + Self::VT_OWNER, + false, + )? + .visit_field::>( + "delegate", + Self::VT_DELEGATE, + false, + )? + .visit_field::>( + "owner_type", + Self::VT_OWNER_TYPE, + false, + )? + .visit_field::>( + "owner_delegate_seq", + Self::VT_OWNER_DELEGATE_SEQ, + false, + )? + .visit_field::>( + "is_current_owner", + Self::VT_IS_CURRENT_OWNER, + false, + )? + .finish(); + Ok(()) + } + } + pub struct AssetOwnerArgs<'a> { + pub pubkey: Option>>, + pub owner: Option>>, + pub delegate: Option>>, + pub owner_type: Option>>, + pub owner_delegate_seq: Option>>, + pub is_current_owner: Option>>, + } + impl<'a> Default for AssetOwnerArgs<'a> { + #[inline] + fn default() -> Self { + AssetOwnerArgs { + pubkey: None, + owner: None, + delegate: None, + owner_type: None, + owner_delegate_seq: None, + is_current_owner: None, + } + } + } + + pub struct AssetOwnerBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> AssetOwnerBuilder<'a, 'b, A> { + #[inline] + pub fn add_pubkey(&mut self, pubkey: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>(AssetOwner::VT_PUBKEY, pubkey); + } + #[inline] + pub fn add_owner(&mut self, owner: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetOwner::VT_OWNER, + owner, + ); + } + #[inline] + pub fn add_delegate( + &mut self, + delegate: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetOwner::VT_DELEGATE, + delegate, + ); + } + #[inline] + pub fn add_owner_type(&mut self, owner_type: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetOwner::VT_OWNER_TYPE, + owner_type, + ); + } + #[inline] + pub fn add_owner_delegate_seq( + &mut self, + owner_delegate_seq: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetOwner::VT_OWNER_DELEGATE_SEQ, + owner_delegate_seq, + ); + } + #[inline] + pub fn add_is_current_owner( + &mut self, + is_current_owner: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetOwner::VT_IS_CURRENT_OWNER, + is_current_owner, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> AssetOwnerBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + AssetOwnerBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for AssetOwner<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("AssetOwner"); + ds.field("pubkey", &self.pubkey()); + ds.field("owner", &self.owner()); + ds.field("delegate", &self.delegate()); + ds.field("owner_type", &self.owner_type()); + ds.field("owner_delegate_seq", &self.owner_delegate_seq()); + ds.field("is_current_owner", &self.is_current_owner()); + ds.finish() + } + } + pub enum AssetCollectionOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct AssetCollection<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for AssetCollection<'a> { + type Inner = AssetCollection<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> AssetCollection<'a> { + pub const VT_PUBKEY: flatbuffers::VOffsetT = 4; + pub const VT_COLLECTION: flatbuffers::VOffsetT = 6; + pub const VT_IS_COLLECTION_VERIFIED: flatbuffers::VOffsetT = 8; + pub const VT_AUTHORITY: flatbuffers::VOffsetT = 10; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + AssetCollection { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args AssetCollectionArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = AssetCollectionBuilder::new(_fbb); + if let Some(x) = args.authority { + builder.add_authority(x); + } + if let Some(x) = args.is_collection_verified { + builder.add_is_collection_verified(x); + } + if let Some(x) = args.collection { + builder.add_collection(x); + } + if let Some(x) = args.pubkey { + builder.add_pubkey(x); + } + builder.finish() + } + + #[inline] + pub fn pubkey(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>( + AssetCollection::VT_PUBKEY, + None, + ) + } + } + #[inline] + pub fn collection(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetCollection::VT_COLLECTION, + None, + ) + } + } + #[inline] + pub fn is_collection_verified(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetCollection::VT_IS_COLLECTION_VERIFIED, + None, + ) + } + } + #[inline] + pub fn authority(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetCollection::VT_AUTHORITY, + None, + ) + } + } + } + + impl flatbuffers::Verifiable for AssetCollection<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>>( + "pubkey", + Self::VT_PUBKEY, + false, + )? + .visit_field::>( + "collection", + Self::VT_COLLECTION, + false, + )? + .visit_field::>( + "is_collection_verified", + Self::VT_IS_COLLECTION_VERIFIED, + false, + )? + .visit_field::>( + "authority", + Self::VT_AUTHORITY, + false, + )? + .finish(); + Ok(()) + } + } + pub struct AssetCollectionArgs<'a> { + pub pubkey: Option>>, + pub collection: Option>>, + pub is_collection_verified: Option>>, + pub authority: Option>>, + } + impl<'a> Default for AssetCollectionArgs<'a> { + #[inline] + fn default() -> Self { + AssetCollectionArgs { + pubkey: None, + collection: None, + is_collection_verified: None, + authority: None, + } + } + } + + pub struct AssetCollectionBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> AssetCollectionBuilder<'a, 'b, A> { + #[inline] + pub fn add_pubkey(&mut self, pubkey: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>(AssetCollection::VT_PUBKEY, pubkey); + } + #[inline] + pub fn add_collection(&mut self, collection: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetCollection::VT_COLLECTION, + collection, + ); + } + #[inline] + pub fn add_is_collection_verified( + &mut self, + is_collection_verified: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetCollection::VT_IS_COLLECTION_VERIFIED, + is_collection_verified, + ); + } + #[inline] + pub fn add_authority( + &mut self, + authority: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetCollection::VT_AUTHORITY, + authority, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> AssetCollectionBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + AssetCollectionBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for AssetCollection<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("AssetCollection"); + ds.field("pubkey", &self.pubkey()); + ds.field("collection", &self.collection()); + ds.field("is_collection_verified", &self.is_collection_verified()); + ds.field("authority", &self.authority()); + ds.finish() + } + } + pub enum AssetCompleteDetailsOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct AssetCompleteDetails<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for AssetCompleteDetails<'a> { + type Inner = AssetCompleteDetails<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> AssetCompleteDetails<'a> { + pub const VT_PUBKEY: flatbuffers::VOffsetT = 4; + pub const VT_STATIC_DETAILS: flatbuffers::VOffsetT = 6; + pub const VT_DYNAMIC_DETAILS: flatbuffers::VOffsetT = 8; + pub const VT_AUTHORITY: flatbuffers::VOffsetT = 10; + pub const VT_OWNER: flatbuffers::VOffsetT = 12; + pub const VT_COLLECTION: flatbuffers::VOffsetT = 14; + pub const VT_OTHER_KNOWN_OWNERS: flatbuffers::VOffsetT = 16; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + AssetCompleteDetails { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args AssetCompleteDetailsArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = AssetCompleteDetailsBuilder::new(_fbb); + if let Some(x) = args.other_known_owners { + builder.add_other_known_owners(x); + } + if let Some(x) = args.collection { + builder.add_collection(x); + } + if let Some(x) = args.owner { + builder.add_owner(x); + } + if let Some(x) = args.authority { + builder.add_authority(x); + } + if let Some(x) = args.dynamic_details { + builder.add_dynamic_details(x); + } + if let Some(x) = args.static_details { + builder.add_static_details(x); + } + if let Some(x) = args.pubkey { + builder.add_pubkey(x); + } + builder.finish() + } + + #[inline] + pub fn pubkey(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>( + AssetCompleteDetails::VT_PUBKEY, + None, + ) + } + } + #[inline] + pub fn static_details(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetCompleteDetails::VT_STATIC_DETAILS, + None, + ) + } + } + #[inline] + pub fn dynamic_details(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetCompleteDetails::VT_DYNAMIC_DETAILS, + None, + ) + } + } + #[inline] + pub fn authority(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetCompleteDetails::VT_AUTHORITY, + None, + ) + } + } + #[inline] + pub fn owner(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>( + AssetCompleteDetails::VT_OWNER, + None, + ) + } + } + #[inline] + pub fn collection(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>( + AssetCompleteDetails::VT_COLLECTION, + None, + ) + } + } + #[inline] + pub fn other_known_owners( + &self, + ) -> Option>>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>, + >>(AssetCompleteDetails::VT_OTHER_KNOWN_OWNERS, None) + } + } + } + + impl flatbuffers::Verifiable for AssetCompleteDetails<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>>( + "pubkey", + Self::VT_PUBKEY, + false, + )? + .visit_field::>( + "static_details", + Self::VT_STATIC_DETAILS, + false, + )? + .visit_field::>( + "dynamic_details", + Self::VT_DYNAMIC_DETAILS, + false, + )? + .visit_field::>( + "authority", + Self::VT_AUTHORITY, + false, + )? + .visit_field::>( + "owner", + Self::VT_OWNER, + false, + )? + .visit_field::>( + "collection", + Self::VT_COLLECTION, + false, + )? + .visit_field::>, + >>("other_known_owners", Self::VT_OTHER_KNOWN_OWNERS, false)? + .finish(); + Ok(()) + } + } + pub struct AssetCompleteDetailsArgs<'a> { + pub pubkey: Option>>, + pub static_details: Option>>, + pub dynamic_details: Option>>, + pub authority: Option>>, + pub owner: Option>>, + pub collection: Option>>, + pub other_known_owners: Option< + flatbuffers::WIPOffset< + flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset>>, + >, + >, + } + impl<'a> Default for AssetCompleteDetailsArgs<'a> { + #[inline] + fn default() -> Self { + AssetCompleteDetailsArgs { + pubkey: None, + static_details: None, + dynamic_details: None, + authority: None, + owner: None, + collection: None, + other_known_owners: None, + } + } + } + + pub struct AssetCompleteDetailsBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> AssetCompleteDetailsBuilder<'a, 'b, A> { + #[inline] + pub fn add_pubkey(&mut self, pubkey: flatbuffers::WIPOffset>) { + self.fbb_.push_slot_always::>( + AssetCompleteDetails::VT_PUBKEY, + pubkey, + ); + } + #[inline] + pub fn add_static_details( + &mut self, + static_details: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetCompleteDetails::VT_STATIC_DETAILS, + static_details, + ); + } + #[inline] + pub fn add_dynamic_details( + &mut self, + dynamic_details: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + AssetCompleteDetails::VT_DYNAMIC_DETAILS, + dynamic_details, + ); + } + #[inline] + pub fn add_authority(&mut self, authority: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetCompleteDetails::VT_AUTHORITY, + authority, + ); + } + #[inline] + pub fn add_owner(&mut self, owner: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetCompleteDetails::VT_OWNER, + owner, + ); + } + #[inline] + pub fn add_collection(&mut self, collection: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>( + AssetCompleteDetails::VT_COLLECTION, + collection, + ); + } + #[inline] + pub fn add_other_known_owners( + &mut self, + other_known_owners: flatbuffers::WIPOffset< + flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset>>, + >, + ) { + self.fbb_.push_slot_always::>( + AssetCompleteDetails::VT_OTHER_KNOWN_OWNERS, + other_known_owners, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> AssetCompleteDetailsBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + AssetCompleteDetailsBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for AssetCompleteDetails<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("AssetCompleteDetails"); + ds.field("pubkey", &self.pubkey()); + ds.field("static_details", &self.static_details()); + ds.field("dynamic_details", &self.dynamic_details()); + ds.field("authority", &self.authority()); + ds.field("owner", &self.owner()); + ds.field("collection", &self.collection()); + ds.field("other_known_owners", &self.other_known_owners()); + ds.finish() + } + } + #[inline] + /// Verifies that a buffer of bytes contains a `AssetCompleteDetails` + /// and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_asset_complete_details_unchecked`. + pub fn root_as_asset_complete_details( + buf: &[u8], + ) -> Result { + flatbuffers::root::(buf) + } + #[inline] + /// Verifies that a buffer of bytes contains a size prefixed + /// `AssetCompleteDetails` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `size_prefixed_root_as_asset_complete_details_unchecked`. + pub fn size_prefixed_root_as_asset_complete_details( + buf: &[u8], + ) -> Result { + flatbuffers::size_prefixed_root::(buf) + } + #[inline] + /// Verifies, with the given options, that a buffer of bytes + /// contains a `AssetCompleteDetails` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_asset_complete_details_unchecked`. + pub fn root_as_asset_complete_details_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::root_with_opts::>(opts, buf) + } + #[inline] + /// Verifies, with the given verifier options, that a buffer of + /// bytes contains a size prefixed `AssetCompleteDetails` and returns + /// it. Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_asset_complete_details_unchecked`. + pub fn size_prefixed_root_as_asset_complete_details_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::size_prefixed_root_with_opts::>(opts, buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a AssetCompleteDetails and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid `AssetCompleteDetails`. + pub unsafe fn root_as_asset_complete_details_unchecked(buf: &[u8]) -> AssetCompleteDetails { + flatbuffers::root_unchecked::(buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a size prefixed AssetCompleteDetails and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid size prefixed `AssetCompleteDetails`. + pub unsafe fn size_prefixed_root_as_asset_complete_details_unchecked( + buf: &[u8], + ) -> AssetCompleteDetails { + flatbuffers::size_prefixed_root_unchecked::(buf) + } + #[inline] + pub fn finish_asset_complete_details_buffer<'a, 'b, A: flatbuffers::Allocator + 'a>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish(root, None); + } + + #[inline] + pub fn finish_size_prefixed_asset_complete_details_buffer< + 'a, + 'b, + A: flatbuffers::Allocator + 'a, + >( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish_size_prefixed(root, None); + } +} // pub mod Asset diff --git a/rocks-db/src/asset_streaming_client.rs b/rocks-db/src/asset_streaming_client.rs index 355c1ad24..4e33020a4 100644 --- a/rocks-db/src/asset_streaming_client.rs +++ b/rocks-db/src/asset_streaming_client.rs @@ -11,13 +11,14 @@ use rocksdb::DB; use solana_sdk::pubkey::Pubkey; use tokio_stream::wrappers::ReceiverStream; -use crate::asset::SlotAssetIdxKey; +use crate::asset::{AssetCompleteDetails, SlotAssetIdxKey}; +use crate::asset_generated::asset as fb; use crate::cl_items::{ClItem, ClItemKey, ClLeaf, ClLeafKey}; use crate::{ - asset::{AssetCollection, AssetLeaf, SlotAssetIdx}, + asset::{AssetLeaf, SlotAssetIdx}, column::TypedColumn, errors::StorageError, - AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, Storage, + Storage, }; #[async_trait] @@ -89,9 +90,21 @@ async fn get_complete_asset_details( pubkey: Pubkey, metrics: Arc, ) -> crate::Result { - let static_data = - Storage::column::(backend.clone(), metrics.clone()).get(pubkey)?; - let static_data = match static_data { + let data = backend.get_pinned_cf( + &backend.cf_handle(AssetCompleteDetails::NAME).unwrap(), + AssetCompleteDetails::encode_key(pubkey), + )?; + let data = match data { + None => { + return Err(StorageError::Common("Asset data not found".to_string())); + } + Some(data) => data, + }; + let data = fb::root_as_asset_complete_details(&data) + .map_err(|e| StorageError::Common(e.to_string()))?; + // TODO: optimization point: this may be optimized by using the flatbuffers directly instead of converting to a struct, skipping for now + let data = AssetCompleteDetails::from(data); + let static_data = match data.static_details { None => { return Err(StorageError::Common( "Asset static data not found".to_string(), @@ -100,9 +113,7 @@ async fn get_complete_asset_details( Some(static_data) => static_data, }; - let dynamic_data = - Storage::column::(backend.clone(), metrics.clone()).get(pubkey)?; - let dynamic_data = match dynamic_data { + let dynamic_data = match data.dynamic_details { None => { return Err(StorageError::Common( "Asset dynamic data not found".to_string(), @@ -110,9 +121,7 @@ async fn get_complete_asset_details( } Some(dynamic_data) => dynamic_data, }; - let authority = - Storage::column::(backend.clone(), metrics.clone()).get(pubkey)?; - let authority = match authority { + let authority = match data.authority { None => { return Err(StorageError::Common( "Asset authority not found".to_string(), @@ -120,8 +129,7 @@ async fn get_complete_asset_details( } Some(authority) => authority, }; - let owner = Storage::column::(backend.clone(), metrics.clone()).get(pubkey)?; - let owner = match owner { + let owner = match data.owner { None => { return Err(StorageError::Common("Asset owner not found".to_string())); } @@ -129,8 +137,7 @@ async fn get_complete_asset_details( }; let asset_leaf = Storage::column::(backend.clone(), metrics.clone()).get(pubkey)?; - let collection = - Storage::column::(backend.clone(), metrics.clone()).get(pubkey)?; + let collection = data.collection; let onchain_data = match dynamic_data.onchain_data { None => None, @@ -225,10 +232,12 @@ async fn get_complete_asset_details( current_size: dynamic_data.current_size, plugins_json_version: dynamic_data.plugins_json_version, authority: Updated::new(authority.slot_updated, None, authority.authority), + owner_record_pubkey: owner.pubkey, owner: owner.owner, delegate: owner.delegate, owner_type: owner.owner_type, owner_delegate_seq: owner.owner_delegate_seq, + is_current_owner: owner.is_current_owner, collection: collection.map(|collection| entities::models::AssetCollection { collection: collection.collection, is_collection_verified: collection.is_collection_verified, diff --git a/rocks-db/src/batch_client.rs b/rocks-db/src/batch_client.rs index e0c8c64c8..a3fdae775 100644 --- a/rocks-db/src/batch_client.rs +++ b/rocks-db/src/batch_client.rs @@ -1,14 +1,10 @@ use std::collections::{HashMap, HashSet}; -use async_trait::async_trait; -use entities::enums::{SpecificationVersions, TokenMetadataEdition}; -use serde_json::json; -use solana_sdk::pubkey::Pubkey; - use crate::asset::{ - AssetCollection, AssetLeaf, AssetsUpdateIdx, FungibleAssetsUpdateIdx, SlotAssetIdx, - SlotAssetIdxKey, + AssetCollection, AssetCompleteDetails, AssetLeaf, AssetsUpdateIdx, MplCoreCollectionAuthority, + SlotAssetIdx, SlotAssetIdxKey, FungibleAssetsUpdateIdx, }; +use crate::asset_generated::asset as fb; use crate::cl_items::{ClItem, ClItemKey, ClLeaf, ClLeafKey}; use crate::column::TypedColumn; use crate::errors::StorageError; @@ -18,12 +14,13 @@ use crate::storage_traits::{ }; use crate::{ AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, Result, Storage, - BATCH_ITERATION_ACTION, ITERATOR_TOP_ACTION, ROCKS_COMPONENT, + BATCH_GET_ACTION, BATCH_ITERATION_ACTION, ITERATOR_TOP_ACTION, ROCKS_COMPONENT, }; -use entities::models::{ - AssetIndex, CompleteAssetDetails, FungibleAssetIndex, UpdateVersion, Updated, UrlWithStatus, -}; - +use async_trait::async_trait; +use entities::enums::{SpecificationAssetClass, SpecificationVersions, TokenMetadataEdition}; +use entities::models::{AssetIndex, CompleteAssetDetails, UpdateVersion, Updated, FungibleAssetIndex, UrlWithStatus}; +use serde_json::json; +use solana_sdk::pubkey::Pubkey; impl AssetUpdateIndexStorage for Storage { fn last_known_fungible_asset_updated_key(&self) -> Result> { _ = self.db.try_catch_up_with_primary(); @@ -185,13 +182,117 @@ impl AssetUpdateIndexStorage for Storage { } } +impl Storage { + pub async fn get_asset_indexes_with_collections_and_urls( + &self, + asset_ids: Vec, + ) -> Result<(Vec, HashSet, HashMap)> { + let db = self.db.clone(); + tokio::task::spawn_blocking(move || { + let d = db.batched_multi_get_cf( + &db.cf_handle(AssetCompleteDetails::NAME).unwrap(), + asset_ids, + false, //sorting the input and using true here slows down the method by 5% for batches or 15% for an indiviual asset + ); + let mut asset_indexes = Vec::new(); + let mut assets_collection_pks = HashSet::new(); + let mut urls = HashMap::new(); + + for asset in d { + let asset = asset?; + if let Some(asset) = asset { + let asset = fb::root_as_asset_complete_details(asset.as_ref()) + .map_err(|e| StorageError::Common(e.to_string()))?; + if asset.static_details().is_none() { + continue; + } + let key = + Pubkey::new_from_array(asset.pubkey().unwrap().bytes().try_into().unwrap()); + asset + .collection() + .and_then(|c| c.collection()) + .and_then(|c| c.value()) + .map(|c| { + assets_collection_pks.insert(Pubkey::try_from(c.bytes()).unwrap()) + }); + asset + .dynamic_details() + .and_then(|d| d.url()) + .and_then(|u| u.value()) + .filter(|s| !s.is_empty()) + .map(|u| urls.insert(key, u.to_string())); + asset_indexes.push(asset.into()); + } + } + Ok::<(_, _, _), StorageError>((asset_indexes, assets_collection_pks, urls)) + }) + .await + .map_err(|e| StorageError::Common(e.to_string()))? + } + + pub async fn get_assets_with_collections_and_urls( + &self, + asset_ids: Vec, + ) -> Result<( + HashMap, + HashSet, + HashMap, + )> { + let db = self.db.clone(); + let red_metrics = self.red_metrics.clone(); + tokio::task::spawn_blocking(move || { + let start_time = chrono::Utc::now(); + let d = db.batched_multi_get_cf( + &db.cf_handle(AssetCompleteDetails::NAME).unwrap(), + asset_ids, + false, //sorting the input and using true here slows down the method by 5% for batches or 15% for an indiviual asset + ); + let mut assets_data = HashMap::new(); + let mut assets_collection_pks = HashSet::new(); + let mut urls = HashMap::new(); + + for asset in d { + let asset = asset?; + if let Some(asset) = asset { + let asset = fb::root_as_asset_complete_details(asset.as_ref()) + .map_err(|e| StorageError::Common(e.to_string()))?; + let key = + Pubkey::new_from_array(asset.pubkey().unwrap().bytes().try_into().unwrap()); + asset + .collection() + .and_then(|c| c.collection()) + .and_then(|c| c.value()) + .map(|c| { + assets_collection_pks.insert(Pubkey::try_from(c.bytes()).unwrap()) + }); + asset + .dynamic_details() + .and_then(|d| d.url()) + .and_then(|u| u.value()) + .map(|u| urls.insert(key, u.to_string())); + assets_data.insert(key, asset.into()); + } + } + red_metrics.observe_request( + ROCKS_COMPONENT, + BATCH_GET_ACTION, + AssetCompleteDetails::NAME, + start_time, + ); + Ok::<(_, _, _), StorageError>((assets_data, assets_collection_pks, urls)) + }) + .await + .map_err(|e| StorageError::Common(e.to_string()))? + } +} + #[async_trait] impl AssetIndexReader for Storage { async fn get_fungible_assets_indexes( &self, keys: &[Pubkey], - ) -> Result> { - let mut fungible_assets_indexes: HashMap = HashMap::new(); + ) -> Result> { + let mut fungible_assets_indexes: Vec = Vec::new(); let start_time = chrono::Utc::now(); let token_accounts_details = self.token_accounts.batch_get(keys.to_vec()).await?; @@ -205,7 +306,7 @@ impl AssetIndexReader for Storage { fungible_asset_balance: Some(token_acc.amount as u64), }; - fungible_assets_indexes.insert(token_acc.pubkey, fungible_asset_index); + fungible_assets_indexes.push(fungible_asset_index); } self.red_metrics.observe_request( @@ -221,173 +322,82 @@ impl AssetIndexReader for Storage { async fn get_nft_asset_indexes<'a>( &self, keys: &[Pubkey], - collection_authorities: Option<&'a HashMap>, - ) -> Result> { - let mut asset_indexes = HashMap::new(); + ) -> Result> { let start_time = chrono::Utc::now(); - let assets_static_fut = self.asset_static_data.batch_get(keys.to_vec()); - let assets_dynamic_fut = self.asset_dynamic_data.batch_get(keys.to_vec()); - let assets_authority_fut = self.asset_authority_data.batch_get(keys.to_vec()); - let assets_owner_fut = self.asset_owner_data.batch_get(keys.to_vec()); - let assets_collection_fut = self.asset_collection_data.batch_get(keys.to_vec()); - - let ( - asset_static_details, - asset_dynamic_details, - asset_authority_details, - asset_owner_details, - asset_collection_details, - ) = tokio::join!( - assets_static_fut, - assets_dynamic_fut, - assets_authority_fut, - assets_owner_fut, - assets_collection_fut, + + let asset_index_collection_url_fut = + self.get_asset_indexes_with_collections_and_urls(keys.to_vec()); + let spl_mints_fut = self.spl_mints.batch_get(keys.to_vec()); + + let (mut asset_indexes, assets_collection_pks, urls) = + asset_index_collection_url_fut.await?; + + let spl_mints = spl_mints_fut.await?; + let is_nft_map = spl_mints + .into_iter() + .flatten() + .map(|spl_mint| (spl_mint.pubkey, spl_mint.is_nft())) + .collect::>(); + let offchain_data_downloaded_map_fut = self + .asset_offchain_data + .batch_get(urls.values().map(|u| u.to_string()).collect()); + + let mut mpl_core_collections = HashMap::new(); + let core_collections_iterator = self.db.batched_multi_get_cf( + &self.db.cf_handle(AssetCompleteDetails::NAME).unwrap(), + assets_collection_pks, + false, ); - - let asset_static_details = asset_static_details?; - let asset_dynamic_details = asset_dynamic_details?; - let asset_authority_details = asset_authority_details?; - let asset_owner_details = asset_owner_details?; - let asset_collection_details = asset_collection_details?; - - let mpl_core_map = { - // during dump creation hashmap with collection authorities will be passed - // and during regular synchronization we should make additional select from DB - if collection_authorities.is_some() { - HashMap::new() - } else { - let assets_collection_pks = asset_collection_details - .iter() - .flat_map(|c| c.as_ref().map(|c| c.collection.value)) - .collect::>(); - - self.asset_collection_data - .batch_get(assets_collection_pks) - .await? - .into_iter() - .flatten() - .filter_map(|asset| asset.authority.value.map(|v| (asset.pubkey, v))) - .collect::>() - } - }; - - // mpl_core_map.is_empty() && collection_authorities.is_some() check covers case when DB is empty - let mpl_core_collections = if mpl_core_map.is_empty() && collection_authorities.is_some() { - collection_authorities.unwrap() - } else { - &mpl_core_map - }; - - for static_info in asset_static_details.iter().flatten() { - let asset_index = AssetIndex { - pubkey: static_info.pubkey, - specification_version: SpecificationVersions::V1, - specification_asset_class: static_info.specification_asset_class, - royalty_target_type: static_info.royalty_target_type, - slot_created: static_info.created_at, - ..Default::default() - }; - - asset_indexes.insert(asset_index.pubkey, asset_index); - } - - for dynamic_info in asset_dynamic_details.iter().flatten() { - if let Some(existed_index) = asset_indexes.get_mut(&dynamic_info.pubkey) { - existed_index.pubkey = dynamic_info.pubkey; - existed_index.is_compressible = dynamic_info.is_compressible.value; - existed_index.is_compressed = dynamic_info.is_compressed.value; - existed_index.is_frozen = dynamic_info.is_frozen.value; - existed_index.supply = dynamic_info.supply.clone().map(|s| s.value as i64); - existed_index.is_burnt = dynamic_info.is_burnt.value; - existed_index.creators = dynamic_info.creators.clone().value; - existed_index.royalty_amount = dynamic_info.royalty_amount.value as i64; - existed_index.slot_updated = dynamic_info.get_slot_updated() as i64; - existed_index.metadata_url = self.url_with_status_for(dynamic_info); - } else { - let asset_index = AssetIndex { - pubkey: dynamic_info.pubkey, - is_compressible: dynamic_info.is_compressible.value, - is_compressed: dynamic_info.is_compressed.value, - is_frozen: dynamic_info.is_frozen.value, - supply: dynamic_info.supply.clone().map(|s| s.value as i64), - is_burnt: dynamic_info.is_burnt.value, - creators: dynamic_info.creators.clone().value, - royalty_amount: dynamic_info.royalty_amount.value as i64, - slot_updated: dynamic_info.get_slot_updated() as i64, - metadata_url: self.url_with_status_for(dynamic_info), - ..Default::default() + for asset in core_collections_iterator { + let asset = asset?; + if let Some(asset) = asset { + let asset = fb::root_as_asset_complete_details(asset.as_ref()) + .map_err(|e| StorageError::Common(e.to_string()))?; + if let Some(auth) = asset + .collection() + .and_then(|collection| collection.authority()) + .and_then(|auth| auth.value()) + { + let key = + Pubkey::new_from_array(asset.pubkey().unwrap().bytes().try_into().unwrap()); + let auth_value = Pubkey::new_from_array(auth.bytes().try_into().unwrap()); + mpl_core_collections.insert(key, auth_value); }; - - asset_indexes.insert(asset_index.pubkey, asset_index); } } - - for data in asset_authority_details.iter().flatten() { - if let Some(existed_index) = asset_indexes.get_mut(&data.pubkey) { - existed_index.pubkey = data.pubkey; - existed_index.authority = Some(data.authority); - if data.slot_updated as i64 > existed_index.slot_updated { - existed_index.slot_updated = data.slot_updated as i64; - } - } else { - let asset_index = AssetIndex { - pubkey: data.pubkey, - authority: Some(data.authority), - slot_updated: data.slot_updated as i64, - ..Default::default() - }; - - asset_indexes.insert(asset_index.pubkey, asset_index); + let offchain_data_downloaded_map: HashMap = offchain_data_downloaded_map_fut + .await? + .into_iter() + .flatten() + .map(|offchain_data| { + ( + offchain_data.url.clone(), + !offchain_data.metadata.is_empty(), + ) + }) + .collect::>(); + + asset_indexes.iter_mut().for_each(|ref mut asset_index| { + if let Some(coll) = asset_index.collection { + asset_index.update_authority = mpl_core_collections.get(&coll).copied(); } - } - - for data in asset_owner_details.iter().flatten() { - if let Some(existed_index) = asset_indexes.get_mut(&data.pubkey) { - existed_index.pubkey = data.pubkey; - existed_index.owner = data.owner.value; - existed_index.delegate = data.delegate.value; - existed_index.owner_type = Some(data.owner_type.value); - if data.get_slot_updated() as i64 > existed_index.slot_updated { - existed_index.slot_updated = data.get_slot_updated() as i64; - } - } else { - let asset_index = AssetIndex { - pubkey: data.pubkey, - owner: data.owner.value, - delegate: data.delegate.value, - owner_type: Some(data.owner_type.value), - slot_updated: data.get_slot_updated() as i64, - ..Default::default() - }; - - asset_indexes.insert(asset_index.pubkey, asset_index); + if let Some(ref mut mut_val) = asset_index.metadata_url { + mut_val.is_downloaded = offchain_data_downloaded_map + .get(&mut_val.metadata_url) + .copied() + .unwrap_or_default(); } - } - - for data in asset_collection_details.iter().flatten() { - if let Some(existed_index) = asset_indexes.get_mut(&data.pubkey) { - existed_index.pubkey = data.pubkey; - existed_index.collection = Some(data.collection.value); - existed_index.is_collection_verified = Some(data.is_collection_verified.value); - existed_index.update_authority = - mpl_core_collections.get(&data.collection.value).copied(); - if data.get_slot_updated() as i64 > existed_index.slot_updated { - existed_index.slot_updated = data.get_slot_updated() as i64; - } - } else { - let asset_index = AssetIndex { - pubkey: data.pubkey, - collection: Some(data.collection.value), - is_collection_verified: Some(data.is_collection_verified.value), - update_authority: mpl_core_collections.get(&data.collection.value).copied(), - slot_updated: data.get_slot_updated() as i64, - ..Default::default() - }; - - asset_indexes.insert(asset_index.pubkey, asset_index); + // We can not trust this field and have to double check it + if (asset_index.specification_asset_class == SpecificationAssetClass::FungibleToken + || asset_index.specification_asset_class == SpecificationAssetClass::FungibleAsset) + && is_nft_map + .get(&asset_index.pubkey) + .map(|v| *v) + .unwrap_or_default() + { + asset_index.specification_asset_class = SpecificationAssetClass::Nft; } - } + }); self.red_metrics.observe_request( ROCKS_COMPONENT, @@ -414,23 +424,24 @@ impl AssetSlotStorage for Storage { impl Storage { pub async fn insert_gaped_data(&self, data: CompleteAssetDetails) -> Result<()> { - let mut batch = rocksdb::WriteBatch::default(); - self.asset_static_data.merge_with_batch( - &mut batch, - data.pubkey, - &AssetStaticDetails { + let write_version = if let Some(write_v) = data.authority.update_version { + match write_v { + UpdateVersion::WriteVersion(v) => Some(v), + _ => None, + } + } else { + None + }; + let acd = AssetCompleteDetails { + pubkey: data.pubkey, + static_details: Some(AssetStaticDetails { pubkey: data.pubkey, specification_asset_class: data.specification_asset_class, royalty_target_type: data.royalty_target_type, created_at: data.slot_created as i64, edition_address: data.edition_address, - }, - )?; - - self.asset_dynamic_data.merge_with_batch( - &mut batch, - data.pubkey, - &AssetDynamicDetails { + }), + dynamic_details: Some(AssetDynamicDetails { pubkey: data.pubkey, is_compressible: data.is_compressible, is_compressed: data.is_compressed, @@ -463,41 +474,30 @@ impl Storage { mpl_core_external_plugins: data.mpl_core_external_plugins, mpl_core_unknown_external_plugins: data.mpl_core_unknown_external_plugins, mint_extensions: data.mint_extensions, - }, - )?; - - let write_version = if let Some(write_v) = data.authority.update_version { - match write_v { - UpdateVersion::WriteVersion(v) => Some(v), - _ => None, - } - } else { - None - }; - - self.asset_authority_data.merge_with_batch( - &mut batch, - data.pubkey, - &AssetAuthority { + }), + authority: Some(AssetAuthority { pubkey: data.pubkey, authority: data.authority.value, slot_updated: data.authority.slot_updated, write_version, - }, - )?; - - if let Some(collection) = data.collection { - self.asset_collection_data.merge_with_batch( - &mut batch, - data.pubkey, - &AssetCollection { - pubkey: data.pubkey, - collection: collection.collection, - is_collection_verified: collection.is_collection_verified, - authority: collection.authority, - }, - )?; - } + }), + owner: Some(AssetOwner { + pubkey: data.owner_record_pubkey, + owner: data.owner, + delegate: data.delegate, + owner_type: data.owner_type, + owner_delegate_seq: data.owner_delegate_seq, + is_current_owner: data.is_current_owner, + }), + collection: data.collection.map(|collection| AssetCollection { + pubkey: data.pubkey, + collection: collection.collection, + is_collection_verified: collection.is_collection_verified, + authority: collection.authority, + }), + }; + let mut batch = rocksdb::WriteBatch::default(); + self.merge_compete_details_with_batch(&mut batch, &acd)?; if let Some(leaf) = data.asset_leaf { self.asset_leaf_data.merge_with_batch( @@ -516,18 +516,6 @@ impl Storage { )? } - self.asset_owner_data.merge_with_batch( - &mut batch, - data.pubkey, - &AssetOwner { - pubkey: data.pubkey, - owner: data.owner, - delegate: data.delegate, - owner_type: data.owner_type, - owner_delegate_seq: data.owner_delegate_seq, - }, - )?; - if let Some(leaf) = data.cl_leaf { self.cl_leafs.put_with_batch( &mut batch, @@ -583,6 +571,38 @@ impl Storage { Ok(()) } + pub(crate) fn merge_compete_details_with_batch( + &self, + batch: &mut rocksdb::WriteBatchWithTransaction, + data: &AssetCompleteDetails, + ) -> Result<()> { + let mut builder = flatbuffers::FlatBufferBuilder::with_capacity(2500); + let acd = data.convert_to_fb(&mut builder); + builder.finish_minimal(acd); + batch.merge_cf( + &self.db.cf_handle(AssetCompleteDetails::NAME).unwrap(), + data.pubkey, + builder.finished_data(), + ); + // Store the MPL Core Collection authority for easy access + if data + .static_details + .as_ref() + .filter(|sd| sd.specification_asset_class == SpecificationAssetClass::MplCoreCollection) + .is_some() + && data.collection.is_some() + { + self.mpl_core_collection_authorities.merge_with_batch( + batch, + data.pubkey, + &MplCoreCollectionAuthority { + authority: data.collection.as_ref().unwrap().authority.clone(), + }, + )?; //this will never error in fact + } + Ok(()) + } + pub(crate) async fn write_batch(&self, batch: rocksdb::WriteBatch) -> Result<()> { let backend = self.db.clone(); tokio::task::spawn_blocking(move || backend.write(batch)) @@ -591,22 +611,4 @@ impl Storage { .map_err(|e| StorageError::Common(e.to_string()))?; Ok(()) } - - fn url_with_status_for(&self, dynamic_info: &AssetDynamicDetails) -> Option { - if dynamic_info.url.value.trim().is_empty() { - None - } else { - // doing this check because there may be saved empty strings for some urls - // because of bug in previous code - let is_downloaded = self - .asset_offchain_data - .get(dynamic_info.url.value.clone()) - .ok() - .flatten() - .map(|a| !a.metadata.is_empty()) - .unwrap_or(false); - - Some(UrlWithStatus::new(&dynamic_info.url.value, is_downloaded)) - } - } } diff --git a/rocks-db/src/batch_savers.rs b/rocks-db/src/batch_savers.rs index ec3d7d39e..5ef144352 100644 --- a/rocks-db/src/batch_savers.rs +++ b/rocks-db/src/batch_savers.rs @@ -1,4 +1,6 @@ -use crate::asset::{AssetCollection, MetadataMintMap}; +use crate::asset::{AssetCollection, AssetCompleteDetails, MetadataMintMap}; +use crate::asset_generated::asset as fb; +use crate::column::TypedColumn; use crate::token_accounts::{TokenAccountMintOwnerIdx, TokenAccountOwnerIdx}; use crate::Result; use crate::{AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, Storage}; @@ -32,6 +34,28 @@ pub struct MetadataModels { pub metadata_mint: Option, } +impl From<&MetadataModels> for AssetCompleteDetails { + fn from(value: &MetadataModels) -> Self { + Self { + pubkey: value + .asset_static + .as_ref() + .map(|s| s.pubkey) + .or_else(|| value.asset_dynamic.as_ref().map(|d| d.pubkey)) + .or_else(|| value.asset_authority.as_ref().map(|a| a.pubkey)) + // this might be wrong for token accounts, where the owner is the token account pubkey, rather than the NFT mint pubkey, but in 2 cases where it's used - it's ok, as static data is passed along with the owner, so that one is used. + .or_else(|| value.asset_owner.as_ref().map(|o| o.pubkey)) + .or_else(|| value.asset_collection.as_ref().map(|c| c.pubkey)) + .unwrap_or_default(), + static_details: value.asset_static.clone(), + dynamic_details: value.asset_dynamic.clone(), + authority: value.asset_authority.clone(), + owner: value.asset_owner.clone(), + collection: value.asset_collection.clone(), + } + } +} + #[macro_export] macro_rules! store_assets { ($self:expr, $asset:expr, $db_field:ident, $metric_name:expr) => {{ @@ -80,46 +104,27 @@ impl BatchSaveStorage { self.batch.len() >= self.batch_size } - fn store_static(&mut self, asset_static: &AssetStaticDetails) -> Result<()> { - store_assets!( - self, - asset_static, - asset_static_data, - "accounts_static_merge_with_batch" - ) - } - pub fn store_owner(&mut self, asset_owner: &AssetOwner) -> Result<()> { - store_assets!( - self, - asset_owner, - asset_owner_data, - "accounts_owner_merge_with_batch" - ) + pub fn store_complete(&mut self, data: &AssetCompleteDetails) -> Result<()> { + self.storage + .merge_compete_details_with_batch(&mut self.batch, data)?; + let res = Ok(()); + result_to_metrics( + self.metrics.clone(), + &res, + "accounts_complete_data_merge_with_batch", + ); + res } + pub fn store_dynamic(&mut self, asset_dynamic: &AssetDynamicDetails) -> Result<()> { - store_assets!( - self, - asset_dynamic, - asset_dynamic_data, - "accounts_dynamic_merge_with_batch" - ) - } - fn store_authority(&mut self, asset_authority: &AssetAuthority) -> Result<()> { - store_assets!( - self, - asset_authority, - asset_authority_data, - "accounts_authority_merge_with_batch" - ) - } - fn store_collection(&mut self, asset_collection: &AssetCollection) -> Result<()> { - store_assets!( - self, - asset_collection, - asset_collection_data, - "accounts_collection_merge_with_batch" - ) + let asset = &AssetCompleteDetails { + pubkey: asset_dynamic.pubkey, + dynamic_details: Some(asset_dynamic.clone()), + ..Default::default() + }; + self.store_complete(asset) } + fn store_metadata_mint(&mut self, metadata_mint_map: &MetadataMintMap) -> Result<()> { store_assets!( self, @@ -212,59 +217,44 @@ impl BatchSaveStorage { ) } - pub fn get_authority(&self, address: Pubkey) -> Pubkey { - self.storage - .asset_authority_data - .get(address) - .unwrap_or(None) - .map(|authority| authority.authority) - .unwrap_or_default() + pub fn get_authority(&self, address: Pubkey) -> Option { + if let Ok(Some(data)) = self.storage.db.get_pinned_cf( + &self + .storage + .db + .cf_handle(AssetCompleteDetails::NAME) + .unwrap(), + address, + ) { + let asset = fb::root_as_asset_complete_details(&data); + return asset + .ok() + .and_then(|a| a.authority()) + .and_then(|auth| auth.authority()) + .map(|k| Pubkey::try_from(k.bytes()).unwrap()); + } + None } + pub fn get_mint_map(&self, key: Pubkey) -> Result> { self.storage.metadata_mint_map.get(key) } - pub fn store_metadata_models(&mut self, metadata_models: &MetadataModels) -> Result<()> { - let mut slot_updated = 0; - let mut key = None; - if let Some(asset_static) = &metadata_models.asset_static { - self.store_static(asset_static)?; - key = Some(asset_static.pubkey); - } - if let Some(asset_dynamic) = &metadata_models.asset_dynamic { - self.store_dynamic(asset_dynamic)?; - key = Some(asset_dynamic.pubkey); - if asset_dynamic.get_slot_updated() > slot_updated { - slot_updated = asset_dynamic.get_slot_updated() - } - } - if let Some(asset_authority) = &metadata_models.asset_authority { - self.store_authority(asset_authority)?; - key = Some(asset_authority.pubkey); - } - if let Some(asset_collection) = &metadata_models.asset_collection { - self.store_collection(asset_collection)?; - key = Some(asset_collection.pubkey); - if asset_collection.get_slot_updated() > slot_updated { - slot_updated = asset_collection.get_slot_updated() - } - } - if let Some(metadata_mint) = &metadata_models.metadata_mint { + pub fn store_metadata_models( + &mut self, + asset: &AssetCompleteDetails, + metadata_mint: Option, + ) -> Result<()> { + if let Some(metadata_mint) = &metadata_mint { self.store_metadata_mint(metadata_mint)?; } - if let Some(asset_owner) = &metadata_models.asset_owner { - self.store_owner(asset_owner)?; - key = Some(asset_owner.pubkey); - if asset_owner.get_slot_updated() > slot_updated { - slot_updated = asset_owner.get_slot_updated() - } - } - - if let Some(key) = key { + if asset.any_field_is_set() { + self.store_complete(asset)?; + let slot_updated = asset.get_slot_updated(); if slot_updated == 0 { return Ok(()); } - if let Err(e) = self.asset_updated_with_batch(slot_updated, key) { + if let Err(e) = self.asset_updated_with_batch(slot_updated, asset.pubkey) { error!("Error while updating assets update idx: {}", e); } } diff --git a/rocks-db/src/bin/column_remover/main.rs b/rocks-db/src/bin/column_remover/main.rs index c8fede4c2..a3ccfa8c1 100644 --- a/rocks-db/src/bin/column_remover/main.rs +++ b/rocks-db/src/bin/column_remover/main.rs @@ -1,13 +1,20 @@ use std::sync::Arc; +use entities::schedule::ScheduledJob; use rocks_db::asset::{ - self, AssetAuthorityDeprecated, AssetDynamicDetailsDeprecated, AssetOwnerDeprecated, - AssetStaticDetailsDeprecated, MetadataMintMap, + self, AssetAuthorityDeprecated, AssetCollectionDeprecated, AssetDynamicDetailsDeprecated, + AssetOwnerDeprecated, AssetStaticDetailsDeprecated, MetadataMintMap, }; +use rocks_db::asset_previews::{AssetPreviews, UrlToDownload}; +use rocks_db::batch_mint::BatchMintWithStaker; use rocks_db::column::TypedColumn; +use rocks_db::inscriptions::{Inscription, InscriptionData}; +use rocks_db::leaf_signatures::LeafSignature; +use rocks_db::token_prices::TokenPrice; use rocks_db::tree_seq::{TreeSeqIdx, TreesGaps}; use rocks_db::{ - cl_items, signature_client, AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, + bubblegum_slots, cl_items, parameters, signature_client, AssetAuthority, AssetDynamicDetails, + AssetOwner, AssetStaticDetails, }; use tokio::sync::Mutex; use tokio::task::JoinSet; @@ -18,11 +25,13 @@ use rustyline::error::ReadlineError; use rustyline::DefaultEditor; use entities::enums::TokenMetadataEdition; -use entities::models::{AssetSignature, TokenAccount}; +use entities::models::{ + AssetSignature, BatchMintToVerify, FailedBatchMint, RawBlock, SplMint, TokenAccount, +}; use metrics_utils::red::RequestErrorDurationMetrics; use rocks_db::migrator::MigrationState; use rocks_db::token_accounts::{TokenAccountMintOwnerIdx, TokenAccountOwnerIdx}; -use std::env; +use std::{env, option}; #[tokio::main(flavor = "multi_thread")] pub async fn main() -> Result<(), String> { @@ -38,30 +47,9 @@ pub async fn main() -> Result<(), String> { // Specify the column families you plan to remove let columns_to_remove = vec![ - AssetStaticDetails::NAME, - AssetDynamicDetails::NAME, - AssetAuthority::NAME, - AssetAuthorityDeprecated::NAME, - AssetOwnerDeprecated::NAME, - asset::AssetLeaf::NAME, - asset::AssetCollection::NAME, - asset::AssetCollectionDeprecated::NAME, - cl_items::ClItem::NAME, - cl_items::ClLeaf::NAME, - asset::AssetsUpdateIdx::NAME, - asset::SlotAssetIdx::NAME, - AssetOwner::NAME, - TreeSeqIdx::NAME, - signature_client::SignatureIdx::NAME, - AssetDynamicDetailsDeprecated::NAME, - MetadataMintMap::NAME, - TreesGaps::NAME, - TokenMetadataEdition::NAME, - AssetStaticDetailsDeprecated::NAME, - AssetSignature::NAME, - TokenAccount::NAME, - TokenAccountOwnerIdx::NAME, - TokenAccountMintOwnerIdx::NAME, + "BUBBLEGUM_SLOTS", // bubblegum_slots::BubblegumSlots::NAME, + "INGESTABLE_SLOTS", // bubblegum_slots::IngestableSlots::NAME, + RawBlock::NAME, ]; // Print the column families to be removed @@ -96,21 +84,12 @@ pub async fn main() -> Result<(), String> { fn remove_column_families(db_path: String, columns_to_remove: &[&str]) { let mut options = Options::default(); - options.create_if_missing(true); - options.create_missing_column_families(true); // Get the existing column families let cf_names = DB::list_cf(&options, &db_path).expect("Failed to list column families."); let red_metrics = Arc::new(RequestErrorDurationMetrics::new()); - let db = rocks_db::Storage::open( - &db_path, - Arc::new(Mutex::new(JoinSet::new())), - red_metrics.clone(), - MigrationState::Last, - ) - .expect("Failed to open DB."); - let db = db.db; + let db = DB::open_cf(&options, &db_path, &cf_names).expect("Failed to open DB."); columns_to_remove.iter().for_each(|cf_name| { if !cf_names.contains(&cf_name.to_string()) { println!("Column family {} does not exist. Skipping it", cf_name); diff --git a/rocks-db/src/bin/leaf_checker/main.rs b/rocks-db/src/bin/leaf_checker/main.rs index 4d88ddeff..7527e35ab 100644 --- a/rocks-db/src/bin/leaf_checker/main.rs +++ b/rocks-db/src/bin/leaf_checker/main.rs @@ -135,27 +135,25 @@ async fn process_leaf(storage: Arc, data: Vec) -> Option<(Pubkey, u if !storage.cl_items.has_key(key).await.unwrap() { let asset_id = get_asset_id(&cl_leaf_data.cli_tree_key, &cl_leaf_data.cli_leaf_idx).await; + let asset_leaf_data_fut = storage.asset_leaf_data.batch_get(vec![asset_id]); + let asset_complete_data = storage.get_complete_asset_details(asset_id); + let asset_leaf_data = asset_leaf_data_fut.await; - let (asset_dynamic_data, asset_leaf_data) = tokio::join!( - storage.asset_dynamic_data.batch_get(vec![asset_id]), - storage.asset_leaf_data.batch_get(vec![asset_id]) - ); - - let asset_dynamic_data = asset_dynamic_data + let asset_dynamic_seq = asset_complete_data .ok() - .and_then(|vec| vec.into_iter().next()) .and_then(|data_opt| data_opt) + .and_then(|acd| acd.dynamic_details) .and_then(|data| data.seq.map(|s| s.value)) .unwrap_or_default(); - let asset_leaf_data = asset_leaf_data + let asset_leaf_seq = asset_leaf_data .ok() .and_then(|vec| vec.into_iter().next()) .and_then(|data_opt| data_opt) .and_then(|data| data.leaf_seq) .unwrap_or_default(); - let max_asset_sequence = std::cmp::max(asset_dynamic_data, asset_leaf_data); + let max_asset_sequence = std::cmp::max(asset_dynamic_seq, asset_leaf_seq); // if seq is 0 means asset does not exist at all // found a few such assets during testing, not sure how it happened yet diff --git a/rocks-db/src/bubblegum_slots.rs b/rocks-db/src/bubblegum_slots.rs index 8769865b2..a172aa629 100644 --- a/rocks-db/src/bubblegum_slots.rs +++ b/rocks-db/src/bubblegum_slots.rs @@ -1,133 +1,9 @@ -use std::collections::HashMap; -use std::sync::Arc; - -use async_trait::async_trait; -use interface::slots_dumper::SlotGetter; use serde::{Deserialize, Serialize}; use crate::column::TypedColumn; use crate::key_encoders::{decode_u64, encode_u64}; use crate::Result; -pub const BUBBLEGUM_SLOTS_PREFIX: &str = "s"; - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct BubblegumSlots {} - -impl TypedColumn for BubblegumSlots { - type KeyType = String; - type ValueType = Self; - const NAME: &'static str = "BUBBLEGUM_SLOTS"; - - fn encode_key(slot: String) -> Vec { - slot.into_bytes() - } - - fn decode_key(bytes: Vec) -> Result { - Ok(String::from_utf8(bytes).unwrap()) - } -} - -pub fn form_bubblegum_slots_key(slot: u64) -> String { - format!("{}{}", BUBBLEGUM_SLOTS_PREFIX, slot) -} - -pub fn bubblegum_slots_key_to_value(key: String) -> u64 { - key[BUBBLEGUM_SLOTS_PREFIX.len()..].parse::().unwrap() -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct IngestableSlots {} - -impl TypedColumn for IngestableSlots { - type KeyType = u64; - type ValueType = Self; - const NAME: &'static str = "INGESTABLE_SLOTS"; - - fn encode_key(slot: u64) -> Vec { - encode_u64(slot) - } - - fn decode_key(bytes: Vec) -> Result { - decode_u64(bytes) - } -} - -pub struct BubblegumSlotGetter { - rocks_client: Arc, -} - -impl BubblegumSlotGetter { - pub fn new(rocks_client: Arc) -> BubblegumSlotGetter { - BubblegumSlotGetter { rocks_client } - } -} - -#[async_trait] -impl SlotGetter for BubblegumSlotGetter { - fn get_unprocessed_slots_iter(&self) -> impl Iterator { - self.rocks_client - .bubblegum_slots - .iter_start() - .filter_map(|k| k.ok()) - .map(|(k, _)| String::from_utf8(k.to_vec())) - .filter_map(|k| k.ok()) - .map(bubblegum_slots_key_to_value) - } - - async fn mark_slots_processed( - &self, - slots: Vec, - ) -> core::result::Result<(), interface::error::StorageError> { - self.rocks_client - .ingestable_slots - .put_batch(slots.iter().fold(HashMap::new(), |mut acc, slot| { - acc.insert(*slot, IngestableSlots {}); - acc - })) - .await?; - self.rocks_client - .bubblegum_slots - .delete_batch(slots.iter().map(|k| form_bubblegum_slots_key(*k)).collect()) - .await?; - - Ok(()) - } -} - -pub struct IngestableSlotGetter { - rocks_client: Arc, -} - -impl IngestableSlotGetter { - pub fn new(rocks_client: Arc) -> IngestableSlotGetter { - IngestableSlotGetter { rocks_client } - } -} - -#[async_trait] -impl SlotGetter for IngestableSlotGetter { - fn get_unprocessed_slots_iter(&self) -> impl Iterator { - self.rocks_client - .ingestable_slots - .iter_start() - .filter_map(|k| k.ok()) - .map(|(k, _)| IngestableSlots::decode_key(k.to_vec())) - .filter_map(|k| k.ok()) - } - - async fn mark_slots_processed( - &self, - slots: Vec, - ) -> core::result::Result<(), interface::error::StorageError> { - self.rocks_client - .ingestable_slots - .delete_batch(slots.clone()) - .await?; - Ok(()) - } -} - #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ForceReingestableSlots {} diff --git a/rocks-db/src/cl_items.rs b/rocks-db/src/cl_items.rs index 60f4e0d10..c501bd5cf 100644 --- a/rocks-db/src/cl_items.rs +++ b/rocks-db/src/cl_items.rs @@ -8,13 +8,11 @@ use solana_sdk::pubkey::Pubkey; use spl_account_compression::events::ChangeLogEventV1; use tracing::{debug, error}; -use crate::asset::AssetLeaf; use crate::column::TypedColumn; -use crate::errors::StorageError; use crate::key_encoders::{decode_u64_pubkey, encode_u64_pubkey}; use crate::transaction::{CopyableChangeLogEventV1, TreeUpdate}; use crate::tree_seq::TreeSeqIdx; -use crate::{AssetDynamicDetails, Result, Storage}; +use crate::{Result, Storage}; /// This column family stores change log items for asset proof construction. /// Basically, it stores all nodes of the tree. @@ -274,44 +272,6 @@ impl Storage { error!("Error while saving tree update: {}", e); }; } - - pub(crate) fn save_tx_data_and_asset_updated_with_batch( - &self, - batch: &mut rocksdb::WriteBatch, - pk: Pubkey, - slot: u64, - leaf: &Option, - dynamic_data: &Option, - ) -> Result<()> { - if let Some(leaf) = leaf { - self.asset_leaf_data.merge_with_batch(batch, pk, leaf)? - }; - if let Some(dynamic_data) = dynamic_data { - self.asset_dynamic_data - .merge_with_batch(batch, pk, dynamic_data)?; - } - self.asset_updated_with_batch(batch, slot, pk)?; - Ok(()) - } - - pub async fn save_tx_data_and_asset_updated( - &self, - pk: Pubkey, - slot: u64, - leaf: &Option, - dynamic_data: &Option, - ) -> Result<()> { - let mut batch = rocksdb::WriteBatchWithTransaction::::default(); - self.save_tx_data_and_asset_updated_with_batch(&mut batch, pk, slot, leaf, dynamic_data)?; - let backend = self.db.clone(); - tokio::task::spawn_blocking(move || { - backend - .write(batch) - .map_err(|e| StorageError::Common(e.to_string())) - }) - .await - .map_err(|e| StorageError::Common(e.to_string()))? - } } fn node_idx_to_leaf_idx(index: u64, tree_height: u32) -> u64 { diff --git a/rocks-db/src/column.rs b/rocks-db/src/column.rs index e4b154aa0..9cd1e5d54 100644 --- a/rocks-db/src/column.rs +++ b/rocks-db/src/column.rs @@ -377,6 +377,18 @@ where .collect::>() } + pub fn pairs_iterator<'a>( + &self, + it: impl Iterator, Box<[u8]>), rocksdb::Error>> + 'a, + ) -> impl Iterator + 'a { + it.filter_map(|r| r.ok()) + .filter_map(|(key_bytes, val_bytes)| { + let k_op = C::decode_key(key_bytes.to_vec()).ok(); + let v_op = deserialize::(&val_bytes).ok(); + k_op.zip(v_op) + }) + } + /// Fetches maximum given amount of records from the beginning of the column family. /// ## Args: /// * `num` - desired amount of records to fetch diff --git a/rocks-db/src/dump_client.rs b/rocks-db/src/dump_client.rs index 5ac865163..f76841e04 100644 --- a/rocks-db/src/dump_client.rs +++ b/rocks-db/src/dump_client.rs @@ -1,35 +1,29 @@ -use crate::{ - column::Column, - key_encoders::decode_pubkey, - storage_traits::{AssetIndexReader, Dumper}, - AssetStaticDetails, Storage, -}; +use crate::asset::MplCoreCollectionAuthority; +use crate::asset_generated::asset as fb; +use crate::column::TypedColumn; +use crate::{column::Column, storage_traits::Dumper, Storage}; use async_trait::async_trait; -use bincode::deserialize; use csv::WriterBuilder; +use entities::models::SplMint; use entities::{ enums::{OwnerType, RoyaltyTargetType, SpecificationAssetClass, SpecificationVersions}, - models::{AssetIndex, TokenAccount}, + models::{TokenAccount, UrlWithStatus}, }; use hex; use inflector::Inflector; use metrics_utils::SynchronizerMetricsConfig; use serde::{Serialize, Serializer}; -use solana_sdk::pubkey::Pubkey; use std::{ collections::{HashMap, HashSet}, fs::File, io::BufWriter, sync::Arc, }; -use tokio::{sync::broadcast, task::JoinSet, time::Instant}; -use tokio::{sync::mpsc, task::JoinError}; +use tokio::time::Instant; use tracing::{error, info}; -use usecase::graceful_stop::graceful_stop; -const MPSC_BUFFER_SIZE: usize = 1_000_000; +const BUF_CAPACITY: usize = 1024 * 1024 * 32; -const ONE_G: usize = 1024 * 1024 * 1024; fn serialize_as_snake_case(value: &T, serializer: S) -> Result where S: Serializer, @@ -103,182 +97,257 @@ impl Storage { rx: &tokio::sync::broadcast::Receiver<()>, synchronizer_metrics: Arc, ) -> Result<(), String> { - let mut iterator_tasks = JoinSet::new(); - let mut writer_tasks = JoinSet::new(); - - let (iterator_shutdown_tx, iterator_shutdown_rx) = broadcast::channel::<()>(1); - let (writer_shutdown_tx, writer_shutdown_rx) = broadcast::channel::<()>(1); - - let (tx_indexes, rx_indexes) = async_channel::unbounded(); - - // Launch async tokio task for each worker which writes data to csv file. - // As a result they process every type of data independently. - let (tx_metadata, rx_metadata) = mpsc::channel(MPSC_BUFFER_SIZE); - let rx_cloned = rx.resubscribe(); - let shutdown_cloned = writer_shutdown_rx.resubscribe(); - - let cloned_metrics = synchronizer_metrics.clone(); - writer_tasks.spawn_blocking(move || { - Self::write_to_file( - metadata_file_and_path, - rx_cloned, - shutdown_cloned, - rx_metadata, - cloned_metrics, - ) - }); - - let (tx_assets, rx_assets) = mpsc::channel(MPSC_BUFFER_SIZE); - let rx_cloned = rx.resubscribe(); - let shutdown_cloned = writer_shutdown_rx.resubscribe(); - - let cloned_metrics = synchronizer_metrics.clone(); - writer_tasks.spawn_blocking(move || { - Self::write_to_file( - assets_file_and_path, - rx_cloned, - shutdown_cloned, - rx_assets, - cloned_metrics, - ) - }); - - let (tx_creators, rx_creators) = mpsc::channel(MPSC_BUFFER_SIZE); - let rx_cloned = rx.resubscribe(); - let shutdown_cloned = writer_shutdown_rx.resubscribe(); - - let cloned_metrics = synchronizer_metrics.clone(); - writer_tasks.spawn_blocking(move || { - Self::write_to_file( - creators_file_and_path, - rx_cloned, - shutdown_cloned, - rx_creators, - cloned_metrics, - ) - }); - - let (tx_authority, rx_authority) = mpsc::channel(MPSC_BUFFER_SIZE); - let rx_cloned = rx.resubscribe(); - let shutdown_cloned = writer_shutdown_rx.resubscribe(); + let mut core_collections: HashMap, Vec> = HashMap::new(); + let mut core_collections_iter = self + .db + .raw_iterator_cf(&self.mpl_core_collection_authorities.handle()); + core_collections_iter.seek_to_first(); + while core_collections_iter.valid() { + let key = core_collections_iter.key().unwrap(); + let value = core_collections_iter.value().unwrap(); + if let Ok(value) = bincode::deserialize::(value) { + if let Some(authority) = value.authority.value { + core_collections.insert(key.to_vec(), authority.to_bytes().to_vec()); + } + } + core_collections_iter.next(); + } - let cloned_metrics = synchronizer_metrics.clone(); - writer_tasks.spawn_blocking(move || { - Self::write_to_file( - authority_file_and_path, - rx_cloned, - shutdown_cloned, - rx_authority, - cloned_metrics, - ) - }); + let mut metadata_key_set = HashSet::new(); + let mut authorities_key_set = HashSet::new(); - let rx_cloned = rx.resubscribe(); - let shutdown_cloned = iterator_shutdown_rx.resubscribe(); - iterator_tasks.spawn(Self::iterate_over_indexes( - rx_cloned, - shutdown_cloned, - rx_indexes, - tx_metadata, - tx_creators, - tx_assets, - tx_authority, - synchronizer_metrics.clone(), - )); + let buf_writer = BufWriter::with_capacity(BUF_CAPACITY, assets_file_and_path.0); - let core_collection_keys: Vec = self - .asset_static_data - .iter_start() - .filter_map(|a| a.ok()) - .filter_map(|(_, v)| deserialize::(v.to_vec().as_ref()).ok()) - .filter(|a| a.specification_asset_class == SpecificationAssetClass::MplCoreCollection) - .map(|a| a.pubkey) - .collect(); + let mut asset_writer = WriterBuilder::new() + .has_headers(false) + .from_writer(buf_writer); - let collections: HashMap = self - .asset_collection_data - .batch_get(core_collection_keys) - .await - .unwrap_or_default() - .into_iter() - .flatten() - .filter_map(|asset| asset.authority.value.map(|v| (asset.pubkey, v))) - .collect(); + let buf_writer = BufWriter::with_capacity(BUF_CAPACITY, authority_file_and_path.0); + let mut authority_writer = WriterBuilder::new() + .has_headers(false) + .from_writer(buf_writer); + let buf_writer = BufWriter::with_capacity(BUF_CAPACITY, creators_file_and_path.0); + let mut creators_writer = WriterBuilder::new() + .has_headers(false) + .from_writer(buf_writer); - // Iteration over `asset_static_data` column via CUSTOM iterator. - let iter = self.asset_static_data.iter_start(); - let mut batch = Vec::with_capacity(batch_size); - // Collect batch of keys. - for k in iter - .filter_map(|k| k.ok()) - .filter_map(|(key, _)| decode_pubkey(key.to_vec()).ok()) - { - synchronizer_metrics.inc_num_of_assets_iter("asset_static_data", 1); - batch.push(k); - // When batch is filled, find `AssetIndex` and send it to `tx_indexes` channel. - if batch.len() >= batch_size { - let start = chrono::Utc::now(); - let indexes = self - .get_nft_asset_indexes(batch.as_ref(), Some(&collections)) - .await - .map_err(|e| e.to_string())?; - self.red_metrics.observe_request( - "Synchronizer", - "get_batch_of_assets", - "get_asset_indexes", - start, - ); + let buf_writer = BufWriter::with_capacity(BUF_CAPACITY, metadata_file_and_path.0); + let mut metadata_writer = WriterBuilder::new() + .has_headers(false) + .from_writer(buf_writer); - tx_indexes - .send(indexes) - .await - .map_err(|e| format!("Error sending asset indexes to channel: {}", e))?; + // Iteration over `asset_data` column via CUSTOM iterator. + let mut iter = self.db.raw_iterator_cf(&self.asset_data.handle()); + iter.seek_to_first(); + while iter.valid() { + let key = iter.key().unwrap(); + let encoded_key = Self::encode(key); + let value = iter.value().unwrap(); + let asset; + unsafe { + asset = fb::root_as_asset_complete_details_unchecked(value); + } + if asset.static_details().is_none() { + iter.next(); + continue; + } + // this will slow down the sync, but it is necessary to determine if the asset is an NFT + // TODO: optimize this + let mut sac: SpecificationAssetClass = asset + .static_details() + .map(|static_details| static_details.specification_asset_class().into()) + .unwrap_or_default(); + if sac == SpecificationAssetClass::Unknown + || sac == SpecificationAssetClass::FungibleToken + || sac == SpecificationAssetClass::FungibleAsset + { + // get the spl token account and check its supply and decimals + // if those are 1 and 0, then it is not a fungible asset, but an NFT + let ta = self + .db + .get_cf(&self.db.cf_handle(SplMint::NAME).unwrap(), key); + if let Ok(Some(ta)) = ta { + if let Ok(ta) = bincode::deserialize::(&ta) { + if ta.is_nft() { + sac = SpecificationAssetClass::Nft; + } + } + } + } + let metadata_url = asset + .dynamic_details() + .and_then(|dd| dd.url()) + .and_then(|url| url.value()) + .filter(|s| !s.is_empty()) + .map(|s| (UrlWithStatus::get_metadata_id_for(s), s)); + if let Some((ref metadata_key, ref url)) = metadata_url { + { + if !metadata_key_set.contains(metadata_key) { + metadata_key_set.insert(metadata_key.clone()); + if let Err(e) = metadata_writer.serialize(( + Self::encode(metadata_key), + url.to_string(), + "pending".to_string(), + )) { + error!("Error writing metadata to csv: {:?}", e); + } + synchronizer_metrics.inc_num_of_records_written("metadata", 1); + } + } + } - // Clearing batch vector to continue iterating and collecting new batch. - batch.clear(); + let slot_updated = asset.get_slot_updated() as i64; + if let Some(cc) = asset + .dynamic_details() + .and_then(|d| d.creators()) + .and_then(|u| u.value()) + { + for creator in cc { + let c_key = creator.creator().unwrap().bytes(); + if let Err(e) = creators_writer.serialize(( + encoded_key.clone(), + Self::encode(c_key), + creator.creator_verified(), + slot_updated, + )) { + error!("Error writing creator to csv: {:?}", e); + } + synchronizer_metrics.inc_num_of_records_written("creators", 1); + } + } + let update_authority = asset + .collection() + .and_then(|c| c.collection()) + .and_then(|c| c.value()) + .and_then(|c| core_collections.get(c.bytes())) + .map(|b| b.to_owned()); + let authority = asset + .authority() + .and_then(|a| a.authority()) + .map(|a| a.bytes().to_vec()); + let collection = asset + .collection() + .and_then(|c| c.collection()) + .and_then(|uc| uc.value()) + .map(|c| Self::encode(c.bytes())); + let record = AssetRecord { + ast_pubkey: encoded_key.clone(), + ast_specification_version: SpecificationVersions::V1, + ast_specification_asset_class: sac, + ast_royalty_target_type: asset + .static_details() + .map(|static_details| static_details.royalty_target_type().into()) + .unwrap_or_default(), + ast_royalty_amount: asset + .dynamic_details() + .and_then(|d| d.royalty_amount()) + .map(|ra| ra.value()) + .unwrap_or_default() as i64, + ast_slot_created: asset + .static_details() + .map(|static_details| static_details.created_at()) + .unwrap_or_default(), + ast_owner_type: asset + .owner() + .and_then(|o| o.owner_type().map(|o| OwnerType::from(o.value()))), + ast_owner: asset + .owner() + .and_then(|o| o.owner()) + .and_then(|o| o.value()) + .map(|v| v.bytes()) + .map(Self::encode), + ast_delegate: asset + .owner() + .and_then(|o| o.delegate()) + .and_then(|o| o.value()) + .map(|v| v.bytes()) + .map(Self::encode), + ast_authority_fk: if let Some(collection) = collection.as_ref() { + if update_authority.is_some() { + Some(collection.to_owned()) + } else if authority.is_some() { + Some(encoded_key.clone()) + } else { + None + } + } else if authority.is_some() { + Some(encoded_key.clone()) + } else { + None + }, + ast_collection: collection.clone(), + ast_is_collection_verified: asset + .collection() + .and_then(|c| c.is_collection_verified()) + .map(|v| v.value()), + ast_is_burnt: asset + .dynamic_details() + .and_then(|d| d.is_burnt()) + .map(|v| v.value()) + .unwrap_or_default(), + ast_is_compressible: asset + .dynamic_details() + .and_then(|d| d.is_compressible()) + .map(|v| v.value()) + .unwrap_or_default(), + ast_is_compressed: asset + .dynamic_details() + .and_then(|d| d.is_compressed()) + .map(|v| v.value()) + .unwrap_or_default(), + ast_is_frozen: asset + .dynamic_details() + .and_then(|d| d.is_frozen()) + .map(|v| v.value()) + .unwrap_or_default(), + ast_supply: asset + .dynamic_details() + .and_then(|d| d.supply()) + .map(|v| v.value() as i64), + ast_metadata_url_id: metadata_url.map(|(k, _)| k).map(Self::encode), + ast_slot_updated: slot_updated, + }; + + if let Err(e) = asset_writer.serialize(record) { + error!("Error writing asset to csv: {:?}", e); + } + synchronizer_metrics.inc_num_of_records_written("asset", 1); + let authority_key = if update_authority.is_some() { + collection + } else { + Some(encoded_key) + }; + let authority = update_authority.or(authority); + if let (Some(authority_key), Some(authority)) = (authority_key, authority) { + { + if !authorities_key_set.contains(&authority_key) { + authorities_key_set.insert(authority_key.clone()); + if let Err(e) = authority_writer.serialize(( + authority_key, + Self::encode(authority), + slot_updated, + )) { + error!("Error writing authority to csv: {:?}", e); + } + synchronizer_metrics.inc_num_of_records_written("authority", 1); + } + } } if !rx.is_empty() { return Err("dump cancelled".to_string()); } + iter.next(); + synchronizer_metrics.inc_num_of_assets_iter("asset", 1); } + _ = tokio::try_join!( + tokio::task::spawn_blocking(move || asset_writer.flush()), + tokio::task::spawn_blocking(move || authority_writer.flush()), + tokio::task::spawn_blocking(move || creators_writer.flush()), + tokio::task::spawn_blocking(move || metadata_writer.flush()) + ) + .map_err(|e| e.to_string())?; - // If there are any records left, we find the `AssetIndex` and send them to the `tx_indexes` channel. - if !batch.is_empty() { - let start = chrono::Utc::now(); - let indexes = self - .get_nft_asset_indexes(batch.as_ref(), Some(&collections)) - .await - .map_err(|e| e.to_string())?; - self.red_metrics.observe_request( - "Synchronizer", - "get_batch_of_assets", - "get_asset_indexes", - start, - ); - - tx_indexes - .send(indexes) - .await - .map_err(|e| format!("Error sending asset indexes to channel: {}", e))?; - } - - // Once we iterate through all the assets in RocksDB we have to send stop signal - // to iterators and wait until they finish its job. - // Because that workers populate channel for writers. - iterator_shutdown_tx - .send(()) - .map_err(|e| format!("Error sending stop signal for indexes iterator: {}", e))?; - info!("Stopping iterators..."); - graceful_stop(&mut iterator_tasks).await; - info!("All iterators are stopped."); - - // Once iterators are stopped it's safe to shut down writers. - writer_shutdown_tx - .send(()) - .map_err(|e| format!("Error sending stop signal for file writers: {}", e))?; - info!("Stopping writers..."); - graceful_stop(&mut writer_tasks).await; - info!("All writers are stopped."); + info!("asset writers are flushed."); Ok(()) } @@ -300,27 +369,23 @@ impl Storage { ) -> Result<(), String> { let column: Column = Self::column(self.db.clone(), self.red_metrics.clone()); - let buf_writer = BufWriter::with_capacity(ONE_G, fungible_tokens_file_and_path.0); + let buf_writer = BufWriter::with_capacity(BUF_CAPACITY, fungible_tokens_file_and_path.0); let mut writer = WriterBuilder::new() .has_headers(false) .from_writer(buf_writer); - // token_acc_key, owner, mint, balance, slot updated + // asset, owner, balance, slot updated let mut batch: Vec<(String, String, String, i64, i64)> = Vec::new(); - for token in column - .iter_start() - .filter_map(|k| k.ok()) - .filter_map(|(_, value)| deserialize::(value.to_vec().as_ref()).ok()) - { + for (_, token) in column.pairs_iterator(column.iter_start()) { if !rx.is_empty() { info!("Shutdown signal received..."); return Ok(()); } batch.push(( - token.pubkey.to_string(), - token.owner.to_string(), - token.mint.to_string(), + Self::encode(token.pubkey), + Self::encode(token.owner), + Self::encode(token.mint), token.amount, token.slot_updated, )); @@ -347,6 +412,7 @@ impl Storage { } if !batch.is_empty() { + let start = Instant::now(); for rec in &batch { if !rx.is_empty() { info!("Shutdown signal received..."); @@ -361,6 +427,13 @@ impl Storage { return Err(msg); } } + + synchronizer_metrics.set_file_write_time( + fungible_tokens_file_and_path.1.as_ref(), + start.elapsed().as_millis() as f64, + ); + synchronizer_metrics + .inc_num_of_records_written(&fungible_tokens_file_and_path.1, batch.len() as u64); batch.clear(); } @@ -377,218 +450,6 @@ impl Storage { Ok(()) } - /// The `iterate_over_indexes` function is an asynchronous method responsible for iterating over a stream of asset indexes and processing them. - /// It extracts `metadata`, `creators`, `assets`, and `authority` information from each index and sends this data to channels for further processing. - /// The function listens to shut down signals to gracefully stop its operations. - #[allow(clippy::too_many_arguments)] - async fn iterate_over_indexes( - rx_cloned: tokio::sync::broadcast::Receiver<()>, - shutdown_cloned: tokio::sync::broadcast::Receiver<()>, - rx_indexes_cloned: async_channel::Receiver>, - tx_metadata_cloned: tokio::sync::mpsc::Sender<(String, String, String)>, - tx_creators_cloned: tokio::sync::mpsc::Sender<(String, String, bool, i64)>, - tx_assets_cloned: tokio::sync::mpsc::Sender, - tx_authority_cloned: tokio::sync::mpsc::Sender<(String, String, i64)>, - synchronizer_metrics: Arc, - ) -> Result<(), JoinError> { - let mut metadata_key_set = HashSet::new(); - let mut authorities_key_set = HashSet::new(); - - loop { - // whole application is stopped - if !rx_cloned.is_empty() { - break; - } - // process with data collection stopped - if !shutdown_cloned.is_empty() && rx_indexes_cloned.is_empty() { - break; - } - - if rx_indexes_cloned.is_empty() { - continue; - } else if let Ok(indexes) = rx_indexes_cloned.try_recv() { - let start = Instant::now(); - for (key, index) in indexes { - let metadata_url = index - .metadata_url - .map(|url| (url.get_metadata_id(), url.metadata_url.trim().to_owned())); - if let Some((ref metadata_key, ref url)) = metadata_url { - { - if !metadata_key_set.contains(metadata_key) { - metadata_key_set.insert(metadata_key.clone()); - if let Err(e) = tx_metadata_cloned - .send(( - Self::encode(metadata_key), - url.to_string(), - "pending".to_string(), - )) - .await - { - error!("Error sending message: {:?}", e); - } - synchronizer_metrics - .inc_num_of_records_sent_to_channel("metadata", 1); - } - } - } - for creator in index.creators { - if let Err(e) = tx_creators_cloned - .send(( - Self::encode(key.to_bytes()), - Self::encode(creator.creator), - creator.creator_verified, - index.slot_updated, - )) - .await - { - error!("Error sending message: {:?}", e); - } - synchronizer_metrics.inc_num_of_records_sent_to_channel("creators", 1); - } - let record = AssetRecord { - ast_pubkey: Self::encode(key.to_bytes()), - ast_specification_version: index.specification_version, - ast_specification_asset_class: index.specification_asset_class, - ast_royalty_target_type: index.royalty_target_type, - ast_royalty_amount: index.royalty_amount, - ast_slot_created: index.slot_created, - ast_owner_type: index.owner_type, - ast_owner: index.owner.map(Self::encode), - ast_delegate: index.delegate.map(Self::encode), - ast_authority_fk: if let Some(collection) = index.collection { - if index.update_authority.is_some() { - Some(Self::encode(collection)) - } else if index.authority.is_some() { - Some(Self::encode(index.pubkey)) - } else { - None - } - } else if index.authority.is_some() { - Some(Self::encode(index.pubkey)) - } else { - None - }, - ast_collection: index.collection.map(Self::encode), - ast_is_collection_verified: index.is_collection_verified, - ast_is_burnt: index.is_burnt, - ast_is_compressible: index.is_compressible, - ast_is_compressed: index.is_compressed, - ast_is_frozen: index.is_frozen, - ast_supply: index.supply, - ast_metadata_url_id: metadata_url.map(|(k, _)| k).map(Self::encode), - ast_slot_updated: index.slot_updated, - }; - if let Err(e) = tx_assets_cloned.send(record).await { - error!("Error sending message: {:?}", e); - } - let authority = index.update_authority.or(index.authority); - let authority_key = if index.update_authority.is_some() { - index.collection - } else { - Some(key) - }; - if let (Some(authority_key), Some(authority)) = (authority_key, authority) { - { - if !authorities_key_set.contains(&authority_key) { - authorities_key_set.insert(authority_key); - if let Err(e) = tx_authority_cloned - .send(( - Self::encode(authority_key.to_bytes()), - Self::encode(authority.to_bytes()), - index.slot_updated, - )) - .await - { - error!("Error sending message: {:?}", e); - } - synchronizer_metrics - .inc_num_of_records_sent_to_channel("authority", 1); - } - } - } - } - - synchronizer_metrics - .set_iter_over_assets_indexes(start.elapsed().as_millis() as f64); - } - } - - Ok(()) - } - - /// The `write_to_file` function is an asynchronous method responsible for writing - /// serialized data to a file using a buffered writer. - /// It listens for data from a `tokio::sync::mpsc::Receiver` channel, - /// and the writing process is controlled by two shutdown signals: one for the application and one for the worker. - /// - /// # Args: - /// `file_and_path` - A tuple containing: - /// A File object used for writing the serialized data. - /// A String representing the file path (for logging and debugging purposes). - /// - /// `application_shutdown` - A `broadcast::Receiver` channel that listens for an application-wide shutdown signal. - /// If this signal is received, the loop will terminate, and writing will stop. - /// - /// `worker_shutdown` - A `broadcast::Receiver` channel that listens for a worker-specific shutdown signal. - /// Writing will stop if both the worker shutdown signal is received and there is no more data to process in the `data_channel`. - /// - /// `data_channel` - An `mpsc::Receiver` channel that provides the serialized data (`T: Serialize`) to be written to the file. - /// Data is processed in the loop until one of the shutdown signals is triggered. - fn write_to_file( - file_and_path: (File, String), - application_shutdown: tokio::sync::broadcast::Receiver<()>, - worker_shutdown: tokio::sync::broadcast::Receiver<()>, - mut data_channel: tokio::sync::mpsc::Receiver, - synchronizer_metrics: Arc, - ) -> Result<(), JoinError> { - let buf_writer = BufWriter::with_capacity(ONE_G, file_and_path.0); - let mut writer = WriterBuilder::new() - .has_headers(false) - .from_writer(buf_writer); - - loop { - if !application_shutdown.is_empty() { - break; - } - - if !worker_shutdown.is_empty() && data_channel.is_empty() { - break; - } - - if data_channel.is_empty() { - continue; - } else if let Ok(k) = data_channel.try_recv() { - let start = Instant::now(); - - if let Err(e) = writer.serialize(k).map_err(|e| e.to_string()) { - error!( - "Error while writing data into {:?}. Err: {:?}", - file_and_path.1, e - ); - } - - synchronizer_metrics.set_file_write_time( - file_and_path.1.as_ref(), - start.elapsed().as_millis() as f64, - ); - synchronizer_metrics.inc_num_of_records_written(&file_and_path.1, 1); - } - } - - let start = Instant::now(); - if let Err(e) = writer.flush().map_err(|e| e.to_string()) { - error!( - "Error happened during flushing data to {:?}. Err: {:?}", - file_and_path.1, e - ); - } - synchronizer_metrics - .set_file_write_time(file_and_path.1.as_ref(), start.elapsed().as_millis() as f64); - synchronizer_metrics.inc_num_of_records_written(&file_and_path.1, 1); - - Ok(()) - } - fn encode>(v: T) -> String { format!("\\x{}", hex::encode(v)) } diff --git a/rocks-db/src/editions.rs b/rocks-db/src/editions.rs index dac6aeb10..3055b9ed4 100644 --- a/rocks-db/src/editions.rs +++ b/rocks-db/src/editions.rs @@ -50,13 +50,13 @@ pub fn merge_token_metadata_edition( for op in operands { match serde_cbor::from_slice(op) { Ok(TokenMetadataEdition::MasterEdition(new_val)) => { - if new_val.write_version > write_version { + if new_val.write_version > write_version || result.is_empty() { write_version = new_val.write_version; result = op.to_vec(); } } Ok(TokenMetadataEdition::EditionV1(new_val)) => { - if new_val.write_version > write_version { + if new_val.write_version > write_version || result.is_empty() { write_version = new_val.write_version; result = op.to_vec(); } diff --git a/rocks-db/src/flatbuf/asset.fbs b/rocks-db/src/flatbuf/asset.fbs new file mode 100644 index 000000000..c039512ea --- /dev/null +++ b/rocks-db/src/flatbuf/asset.fbs @@ -0,0 +1,178 @@ +// asset.fbs +namespace Asset; + +enum SpecificationAssetClass : byte { + Unknown = 0, + FungibleToken, + FungibleAsset, + Nft, + PrintableNft, + ProgrammableNft, + Print, + TransferRestrictedNft, + NonTransferableNft, + IdentityNft, + MplCoreAsset, + MplCoreCollection +} + +enum RoyaltyTargetType : byte { + Unknown = 0, + Creators, + Fanout, + Single +} + +enum OwnerType : byte { + Unknown = 0, + Token, + Single +} + +enum ChainMutability : byte { + Immutable = 0, + Mutable +} + +enum UpdateVersionType : byte { + None = 0, + Sequence, + WriteVersion +} + +table UpdateVersion { + version_type: UpdateVersionType; + version_value: ulong; +} + +table UpdatedBool { + slot_updated: ulong; + update_version: UpdateVersion; + value: bool; +} + +table UpdatedU64 { + slot_updated: ulong; + update_version: UpdateVersion; + value: ulong; +} + +table UpdatedU32 { + slot_updated: ulong; + update_version: UpdateVersion; + value: uint; +} + +table UpdatedString { + slot_updated: ulong; + update_version: UpdateVersion; + value: string; +} + +table UpdatedPubkey { + slot_updated: ulong; + update_version: UpdateVersion; + value: [ubyte]; // 32 bytes +} + +table UpdatedOptionalPubkey { + slot_updated: ulong; + update_version: UpdateVersion; + value: [ubyte]; // 32 bytes, optional +} + +table UpdatedCreators { + slot_updated: ulong; + update_version: UpdateVersion; + value: [Creator]; +} + +table UpdatedChainMutability { + slot_updated: ulong; + update_version: UpdateVersion; + value: ChainMutability; +} + +table UpdatedOwnerType { + slot_updated: ulong; + update_version: UpdateVersion; + value: OwnerType; +} + +table Creator { + creator: [ubyte]; // Pubkey (32 bytes) + creator_verified: bool; + creator_share: uint; // Use uint to represent u8 +} + +table AssetStaticDetails { + pubkey: [ubyte]; // Pubkey (32 bytes) + specification_asset_class: SpecificationAssetClass; + royalty_target_type: RoyaltyTargetType; + created_at: long; + edition_address: [ubyte]; // Optional Pubkey (32 bytes) +} + +table AssetDynamicDetails { + pubkey: [ubyte]; // Pubkey (32 bytes) + is_compressible: UpdatedBool; + is_compressed: UpdatedBool; + is_frozen: UpdatedBool; + supply: UpdatedU64; + seq: UpdatedU64; + is_burnt: UpdatedBool; + was_decompressed: UpdatedBool; + onchain_data: UpdatedString; + creators: UpdatedCreators; + royalty_amount: UpdatedU32; + url: UpdatedString; + chain_mutability: UpdatedChainMutability; + lamports: UpdatedU64; + executable: UpdatedBool; + metadata_owner: UpdatedString; + raw_name: UpdatedString; + mpl_core_plugins: UpdatedString; + mpl_core_unknown_plugins: UpdatedString; + rent_epoch: UpdatedU64; + num_minted: UpdatedU32; + current_size: UpdatedU32; + plugins_json_version: UpdatedU32; + mpl_core_external_plugins: UpdatedString; + mpl_core_unknown_external_plugins: UpdatedString; + mint_extensions: UpdatedString; +} + +table AssetAuthority { + pubkey: [ubyte]; // Pubkey (32 bytes) + authority: [ubyte]; // Pubkey (32 bytes) + slot_updated: ulong; + write_version: ulong; +} + +table AssetOwner { + pubkey: [ubyte]; // Pubkey (32 bytes) + owner: UpdatedOptionalPubkey; + delegate: UpdatedOptionalPubkey; + owner_type: UpdatedOwnerType; + owner_delegate_seq: UpdatedU64; + is_current_owner: UpdatedBool; +} + +table AssetCollection { + pubkey: [ubyte]; // Pubkey (32 bytes) + collection: UpdatedPubkey; + is_collection_verified: UpdatedBool; + authority: UpdatedOptionalPubkey; +} + +table AssetCompleteDetails { + pubkey: [ubyte]; // Pubkey (32 bytes) + static_details: AssetStaticDetails; + dynamic_details: AssetDynamicDetails; + authority: AssetAuthority; + owner: AssetOwner; + collection: AssetCollection; + other_known_owners: [AssetOwner]; +} + +root_type AssetCompleteDetails; diff --git a/rocks-db/src/fork_cleaner.rs b/rocks-db/src/fork_cleaner.rs index ffa87ee66..8c5e7f3e4 100644 --- a/rocks-db/src/fork_cleaner.rs +++ b/rocks-db/src/fork_cleaner.rs @@ -1,10 +1,12 @@ use crate::{ - cl_items::ClItemKey, column::TypedColumn, leaf_signatures::LeafSignature, Storage, DROP_ACTION, - FULL_ITERATION_ACTION, ITERATOR_TOP_ACTION, RAW_BLOCKS_CBOR_ENDPOINT, ROCKS_COMPONENT, + cl_items::ClItemKey, column::TypedColumn, leaf_signatures::LeafSignature, SlotStorage, Storage, + DROP_ACTION, FULL_ITERATION_ACTION, ITERATOR_TOP_ACTION, RAW_BLOCKS_CBOR_ENDPOINT, + ROCKS_COMPONENT, }; use async_trait::async_trait; -use entities::models::{ClItem, ForkedItem, LeafSignatureAllData}; +use entities::models::{ClItem, ForkedItem, LeafSignatureAllData, RawBlock}; use interface::fork_cleaner::{CompressedTreeChangesManager, ForkChecker}; +use rocksdb::IteratorMode; use solana_sdk::{pubkey::Pubkey, signature::Signature}; use std::collections::HashSet; use tokio::sync::broadcast::Receiver; @@ -94,11 +96,18 @@ impl CompressedTreeChangesManager for Storage { } #[async_trait] -impl ForkChecker for Storage { +impl ForkChecker for SlotStorage { fn get_all_non_forked_slots(&self, rx: Receiver<()>) -> HashSet { let start_time = chrono::Utc::now(); let mut all_keys = HashSet::new(); - for (key, _) in self.raw_blocks_cbor.iter_start().filter_map(Result::ok) { + for (key, _) in self + .db + .full_iterator_cf( + &self.db.cf_handle(RawBlock::NAME).unwrap(), + IteratorMode::Start, + ) + .filter_map(Result::ok) + { if !rx.is_empty() { info!("Stop iteration over raw_blocks_cbor iterator..."); return all_keys; @@ -123,21 +132,19 @@ impl ForkChecker for Storage { fn last_slot_for_check(&self) -> u64 { let start_time = chrono::Utc::now(); - for (key, _) in self.raw_blocks_cbor.iter_end().filter_map(Result::ok) { - match crate::key_encoders::decode_u64(key.to_vec()) { - Ok(key) => { - self.red_metrics.observe_request( - ROCKS_COMPONENT, - ITERATOR_TOP_ACTION, - RAW_BLOCKS_CBOR_ENDPOINT, - start_time, - ); - return key; - } - Err(e) => { - error!("Decode raw block key: {}", e); - } - }; + let mut it = self + .db + .raw_iterator_cf(&self.db.cf_handle(RawBlock::NAME).unwrap()); + it.seek_to_last(); + if !it.valid() { + self.red_metrics.observe_request( + ROCKS_COMPONENT, + ITERATOR_TOP_ACTION, + RAW_BLOCKS_CBOR_ENDPOINT, + start_time, + ); + // if there are no saved blocks - we can not do any checks + return 0; } self.red_metrics.observe_request( ROCKS_COMPONENT, @@ -145,7 +152,8 @@ impl ForkChecker for Storage { RAW_BLOCKS_CBOR_ENDPOINT, start_time, ); - // if there no saved block - we cannot do any check - 0 + it.key() + .and_then(|b| RawBlock::decode_key(b.to_vec()).ok()) + .unwrap_or_default() } } diff --git a/rocks-db/src/lib.rs b/rocks-db/src/lib.rs index 81879d966..038718719 100644 --- a/rocks-db/src/lib.rs +++ b/rocks-db/src/lib.rs @@ -2,16 +2,17 @@ use asset_previews::{AssetPreviews, UrlToDownload}; use entities::schedule::ScheduledJob; use inflector::Inflector; use leaf_signatures::LeafSignature; +use std::path::Path; use std::sync::atomic::AtomicU64; use std::{marker::PhantomData, sync::Arc}; use asset::{ - AssetAuthorityDeprecated, AssetCollectionDeprecated, AssetOwnerDeprecated, - FungibleAssetsUpdateIdx, MetadataMintMap, SlotAssetIdx, + AssetAuthorityDeprecated, AssetCollectionDeprecated, AssetCompleteDetails, + AssetDynamicDetailsDeprecated, AssetOwnerDeprecated, AssetStaticDetailsDeprecated, + FungibleAssetsUpdateIdx, MetadataMintMap, MplCoreCollectionAuthority, SlotAssetIdx, }; -use rocksdb::{ColumnFamilyDescriptor, Options, DB}; +use rocksdb::{ColumnFamilyDescriptor, IteratorMode, Options, DB}; -use crate::asset::{AssetDynamicDetailsDeprecated, AssetStaticDetailsDeprecated}; use crate::migrator::{MigrationState, MigrationVersions, RocksMigration}; pub use asset::{ AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, AssetsUpdateIdx, @@ -33,11 +34,8 @@ use crate::migrations::clean_update_authorities::CleanCollectionAuthoritiesMigra use crate::migrations::collection_authority::{ AssetCollectionVersion0, CollectionAuthorityMigration, }; -use crate::migrations::external_plugins::{AssetDynamicDetailsV0, ExternalPluginsMigration}; -use crate::migrations::spl2022::{ - AssetDynamicDetailsWithoutExtentions, DynamicDataToken2022MintExtentionsMigration, - TokenAccounts2022ExtentionsMigration, -}; +use crate::migrations::external_plugins::AssetDynamicDetailsV0; +use crate::migrations::spl2022::TokenAccounts2022ExtentionsMigration; use crate::parameters::ParameterColumn; use crate::token_accounts::{TokenAccountMintOwnerIdx, TokenAccountOwnerIdx}; use crate::token_prices::TokenPrice; @@ -72,13 +70,21 @@ pub mod raw_blocks_streaming_client; pub mod schedule; pub mod sequence_consistent; pub mod signature_client; -pub mod slots_dumper; pub mod storage_traits; pub mod token_accounts; pub mod token_prices; pub mod transaction; pub mod transaction_client; pub mod tree_seq; +// import the flatbuffers runtime library +extern crate flatbuffers; +#[allow( + clippy::missing_safety_doc, + unused_imports, + clippy::extra_unused_lifetimes +)] +pub mod asset_generated; +pub mod mappers; pub type Result = std::result::Result; @@ -89,27 +95,107 @@ const FULL_ITERATION_ACTION: &str = "full_iteration"; const BATCH_ITERATION_ACTION: &str = "batch_iteration"; const BATCH_GET_ACTION: &str = "batch_get"; const ITERATOR_TOP_ACTION: &str = "iterator_top"; +const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB +pub struct SlotStorage { + pub db: Arc, + pub raw_blocks_cbor: Column, + join_set: Arc>>>, + red_metrics: Arc, +} + +impl SlotStorage { + pub fn new( + db: Arc, + join_set: Arc>>>, + red_metrics: Arc, + ) -> Self { + let raw_blocks_cbor = Storage::column(db.clone(), red_metrics.clone()); + Self { + db, + raw_blocks_cbor, + red_metrics, + join_set, + } + } + + pub fn cf_names() -> Vec<&'static str> { + vec![RawBlock::NAME, MigrationVersions::NAME, OffChainData::NAME] + } + + pub fn open

( + db_path: P, + join_set: Arc>>>, + red_metrics: Arc, + ) -> Result + where + P: AsRef, + { + let cf_descriptors = Storage::cfs_to_column_families(Self::cf_names()); + let db = Arc::new(DB::open_cf_descriptors( + &Storage::get_db_options(), + db_path, + cf_descriptors, + )?); + Ok(Self::new(db, join_set, red_metrics)) + } + + pub fn open_secondary

( + primary_path: P, + secondary_path: P, + join_set: Arc>>>, + red_metrics: Arc, + ) -> Result + where + P: AsRef, + { + let cf_descriptors = Storage::cfs_to_column_families(Self::cf_names()); + let db = Arc::new(DB::open_cf_descriptors_as_secondary( + &Storage::get_db_options(), + primary_path, + secondary_path, + cf_descriptors, + )?); + Ok(Self::new(db, join_set, red_metrics)) + } + pub fn open_readonly

( + db_path: P, + join_set: Arc>>>, + red_metrics: Arc, + ) -> Result + where + P: AsRef, + { + let db = Arc::new(Storage::open_readonly_with_cfs_only_db( + db_path, + Self::cf_names(), + )?); + + Ok(Self::new(db, join_set, red_metrics)) + } +} pub struct Storage { + pub asset_data: Column, + pub mpl_core_collection_authorities: Column, + + // TODO: Deprecated, remove start pub asset_static_data: Column, pub asset_static_data_deprecated: Column, pub asset_dynamic_data: Column, pub asset_dynamic_data_deprecated: Column, - pub metadata_mint_map: Column, pub asset_authority_data: Column, pub asset_authority_deprecated: Column, pub asset_owner_data_deprecated: Column, pub asset_owner_data: Column, - pub asset_leaf_data: Column, pub asset_collection_data: Column, pub asset_collection_data_deprecated: Column, + // Deprecated, remove end + pub metadata_mint_map: Column, + pub asset_leaf_data: Column, pub asset_offchain_data: Column, pub cl_items: Column, pub cl_leafs: Column, - pub bubblegum_slots: Column, - pub ingestable_slots: Column, pub force_reingestable_slots: Column, - pub raw_blocks_cbor: Column, pub db: Arc, pub assets_update_idx: Column, pub fungible_assets_update_idx: Column, @@ -148,6 +234,8 @@ impl Storage { let asset_static_data = Self::column(db.clone(), red_metrics.clone()); let asset_dynamic_data = Self::column(db.clone(), red_metrics.clone()); let asset_dynamic_data_deprecated = Self::column(db.clone(), red_metrics.clone()); + let asset_data = Self::column(db.clone(), red_metrics.clone()); + let mpl_core_collection_authorities = Self::column(db.clone(), red_metrics.clone()); let metadata_mint_map = Self::column(db.clone(), red_metrics.clone()); let asset_authority_data = Self::column(db.clone(), red_metrics.clone()); let asset_authority_deprecated = Self::column(db.clone(), red_metrics.clone()); @@ -161,10 +249,7 @@ impl Storage { let cl_items = Self::column(db.clone(), red_metrics.clone()); let cl_leafs = Self::column(db.clone(), red_metrics.clone()); - let bubblegum_slots = Self::column(db.clone(), red_metrics.clone()); - let ingestable_slots = Self::column(db.clone(), red_metrics.clone()); let force_reingestable_slots = Self::column(db.clone(), red_metrics.clone()); - let raw_blocks = Self::column(db.clone(), red_metrics.clone()); let assets_update_idx = Self::column(db.clone(), red_metrics.clone()); let fungible_assets_update_idx = Self::column(db.clone(), red_metrics.clone()); let slot_asset_idx = Self::column(db.clone(), red_metrics.clone()); @@ -190,6 +275,9 @@ impl Storage { let spl_mints = Self::column(db.clone(), red_metrics.clone()); Self { + asset_data, + mpl_core_collection_authorities, + asset_static_data, asset_dynamic_data, asset_dynamic_data_deprecated, @@ -204,10 +292,7 @@ impl Storage { asset_offchain_data, cl_items, cl_leafs, - bubblegum_slots, - ingestable_slots, force_reingestable_slots, - raw_blocks_cbor: raw_blocks, db, assets_update_idx, fungible_assets_update_idx, @@ -239,12 +324,15 @@ impl Storage { } } - pub fn open( - db_path: &str, + pub fn open

( + db_path: P, join_set: Arc>>>, red_metrics: Arc, migration_state: MigrationState, - ) -> Result { + ) -> Result + where + P: AsRef, + { let cf_descriptors = Self::create_cf_descriptors(&migration_state); let db = Arc::new(DB::open_cf_descriptors( &Self::get_db_options(), @@ -254,13 +342,16 @@ impl Storage { Ok(Self::new(db, join_set, red_metrics)) } - pub fn open_secondary( - primary_path: &str, - secondary_path: &str, + pub fn open_secondary

( + primary_path: P, + secondary_path: P, join_set: Arc>>>, red_metrics: Arc, migration_state: MigrationState, - ) -> Result { + ) -> Result + where + P: AsRef, + { let cf_descriptors = Self::create_cf_descriptors(&migration_state); let db = Arc::new(DB::open_cf_descriptors_as_secondary( &Self::get_db_options(), @@ -271,35 +362,70 @@ impl Storage { Ok(Self::new(db, join_set, red_metrics)) } + pub fn open_cfs

( + db_path: P, + c_names: Vec<&str>, + join_set: Arc>>>, + red_metrics: Arc, + ) -> Result + where + P: AsRef, + { + let cf_descriptors = Self::cfs_to_column_families(c_names); + let db = Arc::new(DB::open_cf_descriptors( + &Self::get_db_options(), + db_path, + cf_descriptors, + )?); + Ok(Self::new(db, join_set, red_metrics)) + } + + pub fn open_readonly_with_cfs

( + db_path: P, + c_names: Vec<&str>, + join_set: Arc>>>, + red_metrics: Arc, + ) -> Result + where + P: AsRef, + { + let db = Arc::new(Self::open_readonly_with_cfs_only_db(db_path, c_names)?); + Ok(Self::new(db, join_set, red_metrics)) + } + + fn cfs_to_column_families(cfs: Vec<&str>) -> Vec { + cfs.iter() + .map(|name| ColumnFamilyDescriptor::new(*name, Self::get_default_cf_options())) + .collect() + } + + pub fn open_readonly_with_cfs_only_db

(db_path: P, c_names: Vec<&str>) -> Result + where + P: AsRef, + { + let cf_descriptors = Self::cfs_to_column_families(c_names); + DB::open_cf_descriptors_read_only(&Self::get_db_options(), db_path, cf_descriptors, false) + .map_err(StorageError::RocksDb) + } + fn create_cf_descriptors(migration_state: &MigrationState) -> Vec { vec![ Self::new_cf_descriptor::(migration_state), - Self::new_cf_descriptor::(migration_state), - Self::new_cf_descriptor::(migration_state), - Self::new_cf_descriptor::(migration_state), + Self::new_cf_descriptor::(migration_state), + Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), - Self::new_cf_descriptor::(migration_state), - Self::new_cf_descriptor::(migration_state), - Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), - Self::new_cf_descriptor::(migration_state), - Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), - Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), - Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::>(migration_state), - Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), - Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), - Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), @@ -369,9 +495,7 @@ impl Storage { options } - fn get_cf_options(migration_state: &MigrationState) -> Options { - const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB - + fn get_default_cf_options() -> Options { let mut cf_options = Options::default(); // 256 * 8 = 2GB. 6 of these columns should take at most 12GB of RAM cf_options.set_max_write_buffer_number(8); @@ -385,6 +509,11 @@ impl Storage { cf_options.set_level_zero_file_num_compaction_trigger(file_num_compaction_trigger as i32); cf_options.set_max_bytes_for_level_base(total_size_base); cf_options.set_target_file_size_base(file_size_base); + cf_options + } + + fn get_cf_options(migration_state: &MigrationState) -> Options { + let mut cf_options = Self::get_default_cf_options(); if matches!(migration_state, &MigrationState::CreateColumnFamilies) { cf_options.set_merge_operator_associative( @@ -395,6 +524,19 @@ impl Storage { } // Optional merges match C::NAME { + // todo: add migration version + asset::AssetCompleteDetails::NAME => { + cf_options.set_merge_operator_associative( + "merge_fn_merge_complete_details", + asset::merge_complete_details_fb_simplified, + ); + } + MplCoreCollectionAuthority::NAME => { + cf_options.set_merge_operator_associative( + "merge_fn_merge_mpl_core_collection_authority", + asset::MplCoreCollectionAuthority::merge, + ); + } AssetStaticDetails::NAME => { cf_options.set_merge_operator_associative( "merge_fn_merge_static_details", @@ -404,14 +546,9 @@ impl Storage { asset::AssetDynamicDetails::NAME => { let mf = match migration_state { MigrationState::Version(version) => match *version { - CollectionAuthorityMigration::VERSION - ..=ExternalPluginsMigration::VERSION => { + CollectionAuthorityMigration::VERSION => { AssetDynamicDetailsV0::merge_dynamic_details } - CleanCollectionAuthoritiesMigration::VERSION - ..=DynamicDataToken2022MintExtentionsMigration::VERSION => { - AssetDynamicDetailsWithoutExtentions::merge_dynamic_details - } _ => asset::AssetDynamicDetails::merge_dynamic_details, }, MigrationState::Last => asset::AssetDynamicDetails::merge_dynamic_details, @@ -504,30 +641,12 @@ impl Storage { asset::AssetStaticDetails::merge_keep_existing, ); } - bubblegum_slots::BubblegumSlots::NAME => { - cf_options.set_merge_operator_associative( - "merge_fn_bubblegum_slots_keep_existing", - asset::AssetStaticDetails::merge_keep_existing, - ); - } - bubblegum_slots::IngestableSlots::NAME => { - cf_options.set_merge_operator_associative( - "merge_fn_ingestable_slots_keep_existing", - asset::AssetStaticDetails::merge_keep_existing, - ); - } bubblegum_slots::ForceReingestableSlots::NAME => { cf_options.set_merge_operator_associative( "merge_fn_force_reingestable_slots_keep_existing", asset::AssetStaticDetails::merge_keep_existing, ); } - RawBlock::NAME => { - cf_options.set_merge_operator_associative( - "merge_fn_raw_block_keep_existing", - asset::AssetStaticDetails::merge_keep_existing, - ); - } AssetsUpdateIdx::NAME => { cf_options.set_merge_operator_associative( "merge_fn_assets_update_idx_keep_existing", @@ -662,4 +781,52 @@ impl Storage { } cf_options } + + #[cfg(feature = "integration_tests")] + pub async fn clean_db(&self) { + let column_families_to_remove = [ + MetadataMintMap::NAME, + asset::AssetLeaf::NAME, + OffChainData::NAME, + cl_items::ClItem::NAME, + cl_items::ClLeaf::NAME, + bubblegum_slots::ForceReingestableSlots::NAME, + AssetsUpdateIdx::NAME, + FungibleAssetsUpdateIdx::NAME, + SlotAssetIdx::NAME, + TreeSeqIdx::NAME, + TreesGaps::NAME, + TokenMetadataEdition::NAME, + TokenAccount::NAME, + TokenAccountOwnerIdx::NAME, + TokenAccountMintOwnerIdx::NAME, + AssetSignature::NAME, + BatchMintToVerify::NAME, + FailedBatchMint::NAME, + BatchMintWithStaker::NAME, + MigrationVersions::NAME, + TokenPrice::NAME, + AssetPreviews::NAME, + UrlToDownload::NAME, + ScheduledJob::NAME, + Inscription::NAME, + InscriptionData::NAME, + LeafSignature::NAME, + SplMint::NAME, + AssetCompleteDetails::NAME, + MplCoreCollectionAuthority::NAME, + ]; + + for cf in column_families_to_remove { + let cf_handler = self.db.cf_handle(cf).unwrap(); + for res in self.db.full_iterator_cf( + &cf_handler, + IteratorMode::Start, + ) { + if let Ok((key, _value)) = res { + self.db.delete_cf(&cf_handler, key).unwrap(); + } + } + } + } } diff --git a/rocks-db/src/mappers.rs b/rocks-db/src/mappers.rs new file mode 100644 index 000000000..7404a7cf1 --- /dev/null +++ b/rocks-db/src/mappers.rs @@ -0,0 +1,351 @@ +use std::cmp::Ordering; + +use solana_sdk::pubkey::Pubkey; + +use crate::asset_generated::asset as fb; +use entities::enums::*; +use entities::models::*; + +macro_rules! impl_from_enum { + ($src:ty, $dst:ty, $($variant:ident),*) => { + impl From<$src> for $dst { + fn from(value: $src) -> Self { + match value { + $( + <$src>::$variant => <$dst>::$variant, + )* + } + } + } + + impl From<$dst> for $src { + fn from(value: $dst) -> Self { + match value { + $( + <$dst>::$variant => <$src>::$variant, + )* + _ => <$src>::Unknown, + } + } + } + }; +} + +impl_from_enum!( + SpecificationAssetClass, + fb::SpecificationAssetClass, + Unknown, + FungibleToken, + FungibleAsset, + Nft, + PrintableNft, + ProgrammableNft, + Print, + TransferRestrictedNft, + NonTransferableNft, + IdentityNft, + MplCoreAsset, + MplCoreCollection +); + +impl_from_enum!( + RoyaltyTargetType, + fb::RoyaltyTargetType, + Unknown, + Creators, + Fanout, + Single +); +impl_from_enum!(OwnerType, fb::OwnerType, Unknown, Token, Single); + +impl From for fb::ChainMutability { + fn from(value: ChainMutability) -> Self { + match value { + ChainMutability::Mutable => fb::ChainMutability::Mutable, + ChainMutability::Immutable => fb::ChainMutability::Immutable, + } + } +} + +impl From for ChainMutability { + fn from(value: fb::ChainMutability) -> Self { + match value { + fb::ChainMutability::Mutable => ChainMutability::Mutable, + fb::ChainMutability::Immutable => ChainMutability::Immutable, + _ => ChainMutability::Immutable, + } + } +} + +impl<'a> fb::AssetCompleteDetails<'a> { + pub fn get_slot_updated(&'a self) -> u64 { + // Collect the slot_updated values from all available fields + let slots = [ + self.dynamic_details() + .and_then(|d| d.is_compressible()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.is_compressed()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.is_frozen()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.supply()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.seq()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.is_burnt()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.was_decompressed()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.onchain_data()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.creators()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.royalty_amount()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.url()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.chain_mutability()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.lamports()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.executable()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.metadata_owner()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.raw_name()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.mpl_core_plugins()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.mpl_core_unknown_plugins()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.rent_epoch()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.num_minted()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.current_size()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.plugins_json_version()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.mpl_core_external_plugins()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.mpl_core_unknown_external_plugins()) + .map(|c| c.slot_updated()), + self.dynamic_details() + .and_then(|d| d.mint_extensions()) + .map(|c| c.slot_updated()), + self.authority().map(|a| a.slot_updated()), + self.owner() + .and_then(|o| o.owner()) + .map(|o| o.slot_updated()), + self.owner() + .and_then(|o| o.delegate()) + .map(|o| o.slot_updated()), + self.owner() + .and_then(|o| o.owner_type()) + .map(|o| o.slot_updated()), + self.owner() + .and_then(|o| o.owner_delegate_seq()) + .map(|o| o.slot_updated()), + self.collection() + .and_then(|c| c.collection()) + .map(|o| o.slot_updated()), + self.collection() + .and_then(|c| c.is_collection_verified()) + .map(|o| o.slot_updated()), + self.collection() + .and_then(|c| c.authority()) + .map(|o| o.slot_updated()), + ]; + // Filter out None values and find the maximum slot_updated + slots.iter().filter_map(|&slot| slot).max().unwrap_or(0) + } +} + +impl<'a> From> for Creator { + fn from(value: fb::Creator<'a>) -> Self { + Creator { + creator: Pubkey::try_from(value.creator().unwrap().bytes()).unwrap(), + creator_verified: value.creator_verified(), + creator_share: value.creator_share() as u8, + } + } +} + +impl<'a> From> for AssetIndex { + fn from(value: fb::AssetCompleteDetails<'a>) -> Self { + let pubkey = Pubkey::try_from(value.pubkey().unwrap().bytes()).unwrap(); + AssetIndex { + pubkey, + specification_version: SpecificationVersions::V1, + specification_asset_class: value + .static_details() + .map(|v| v.specification_asset_class().into()) + .unwrap_or_default(), + royalty_target_type: value + .static_details() + .map(|a| a.royalty_target_type().into()) + .unwrap_or_default(), + slot_created: value + .static_details() + .map(|a| a.created_at()) + .unwrap_or_default(), + owner_type: value + .owner() + .and_then(|o| o.owner_type()) + .map(|u| u.value().into()), + owner: value + .owner() + .and_then(|o| o.owner()) + .and_then(|u| u.value()) + .map(|k| Pubkey::try_from(k.bytes()).unwrap()), + delegate: value + .owner() + .and_then(|o| o.delegate()) + .and_then(|u| u.value()) + .map(|k| Pubkey::try_from(k.bytes()).unwrap()), + authority: value + .authority() + .and_then(|a| a.authority()) + .map(|k| Pubkey::try_from(k.bytes()).unwrap()), + collection: value + .collection() + .and_then(|c| c.collection()) + .and_then(|u| u.value()) + .map(|k| Pubkey::try_from(k.bytes()).unwrap()), + is_collection_verified: value + .collection() + .and_then(|c| c.is_collection_verified()) + .map(|u| u.value()), + creators: value + .dynamic_details() + .and_then(|d| d.creators()) + .and_then(|u| u.value()) + .map(|v| v.iter().map(Creator::from).collect()) + .unwrap_or_default(), + royalty_amount: value + .dynamic_details() + .and_then(|u| u.royalty_amount()) + .map(|d| d.value() as i64) + .unwrap_or_default(), + is_burnt: value + .dynamic_details() + .and_then(|d| d.is_burnt()) + .map(|u| u.value()) + .unwrap_or_default(), + is_compressible: value + .dynamic_details() + .and_then(|d| d.is_compressible()) + .map(|u| u.value()) + .unwrap_or_default(), + is_compressed: value + .dynamic_details() + .and_then(|d| d.is_compressed()) + .map(|u| u.value()) + .unwrap_or_default(), + is_frozen: value + .dynamic_details() + .and_then(|d| d.is_frozen()) + .map(|u| u.value()) + .unwrap_or_default(), + supply: value + .dynamic_details() + .and_then(|d| d.supply()) + .map(|u| u.value() as i64), + update_authority: None, // requires mpl core collections + metadata_url: value + .dynamic_details() + .and_then(|d| d.url()) + .and_then(|u| u.value()) + .filter(|s| !s.is_empty()) + .map(|s| UrlWithStatus::new(s, false)), + slot_updated: value.get_slot_updated() as i64, + fungible_asset_mint: None, + fungible_asset_balance: None, + } + } +} + +impl PartialOrd for fb::UpdateVersion<'_> { + fn partial_cmp(&self, other: &Self) -> Option { + match (self.version_type(), other.version_type()) { + (fb::UpdateVersionType::Sequence, fb::UpdateVersionType::Sequence) + | (fb::UpdateVersionType::WriteVersion, fb::UpdateVersionType::WriteVersion) => { + self.version_value().partial_cmp(&other.version_value()) + } + // this is asset decompress case. Update with write version field is always most recent + (fb::UpdateVersionType::Sequence, fb::UpdateVersionType::WriteVersion) => { + Some(Ordering::Less) + } + (fb::UpdateVersionType::WriteVersion, fb::UpdateVersionType::Sequence) => None, + _ => None, + } + } +} +macro_rules! impl_partial_ord_for_updated { + ($name:ident) => { + impl PartialOrd for fb::$name<'_> { + fn partial_cmp(&self, other: &Self) -> Option { + Some( + match self.update_version().partial_cmp(&other.update_version()) { + Some(std::cmp::Ordering::Equal) => { + self.slot_updated().cmp(&other.slot_updated()) + } + Some(ord) => ord, + None => self.slot_updated().cmp(&other.slot_updated()), + }, + ) + } + } + }; +} + +impl_partial_ord_for_updated!(UpdatedBool); +impl_partial_ord_for_updated!(UpdatedU64); +impl_partial_ord_for_updated!(UpdatedU32); +impl_partial_ord_for_updated!(UpdatedString); +impl_partial_ord_for_updated!(UpdatedPubkey); +impl_partial_ord_for_updated!(UpdatedOptionalPubkey); +impl_partial_ord_for_updated!(UpdatedCreators); +impl_partial_ord_for_updated!(UpdatedChainMutability); +impl_partial_ord_for_updated!(UpdatedOwnerType); + +impl fb::AssetAuthority<'_> { + pub fn compare(&self, other: &Self) -> Ordering { + if let (Some(self_write_version), Some(other_write_version)) = unsafe { + ( + self._tab + .get::(fb::AssetAuthority::VT_WRITE_VERSION, None), + other + ._tab + .get::(fb::AssetAuthority::VT_WRITE_VERSION, None), + ) + } { + self_write_version.cmp(&other_write_version) + } else { + self.slot_updated().cmp(&other.slot_updated()) + } + } +} diff --git a/rocks-db/src/migrations/external_plugins.rs b/rocks-db/src/migrations/external_plugins.rs index b7a8babac..2460b4f48 100644 --- a/rocks-db/src/migrations/external_plugins.rs +++ b/rocks-db/src/migrations/external_plugins.rs @@ -1,5 +1,4 @@ use crate::asset::{update_field, update_optional_field}; -use crate::migrations::spl2022::AssetDynamicDetailsWithoutExtentions; use crate::migrator::{RocksMigration, SerializationType}; use bincode::{deserialize, serialize}; use entities::enums::ChainMutability; @@ -18,7 +17,7 @@ pub struct AssetDynamicDetailsV0 { pub supply: Option>, pub seq: Option>, pub is_burnt: Updated, - pub was_decompressed: Updated, + pub was_decompressed: Option>, pub onchain_data: Option>, pub creators: Updated>, pub royalty_amount: Updated, @@ -36,38 +35,6 @@ pub struct AssetDynamicDetailsV0 { pub plugins_json_version: Option>, } -impl From for AssetDynamicDetailsWithoutExtentions { - fn from(value: AssetDynamicDetailsV0) -> Self { - Self { - pubkey: value.pubkey, - is_compressible: value.is_compressible, - is_compressed: value.is_compressed, - is_frozen: value.is_frozen, - supply: value.supply, - seq: value.seq, - is_burnt: value.is_burnt, - was_decompressed: value.was_decompressed, - onchain_data: value.onchain_data, - creators: value.creators, - royalty_amount: value.royalty_amount, - url: value.url, - chain_mutability: value.chain_mutability, - lamports: value.lamports, - executable: value.executable, - metadata_owner: value.metadata_owner, - raw_name: value.raw_name, - mpl_core_plugins: value.plugins, - mpl_core_unknown_plugins: value.unknown_plugins, - rent_epoch: value.rent_epoch, - num_minted: value.num_minted, - current_size: value.current_size, - plugins_json_version: value.plugins_json_version, - mpl_core_external_plugins: None, - mpl_core_unknown_external_plugins: None, - } - } -} - impl AssetDynamicDetailsV0 { pub fn merge_dynamic_details( _new_key: &[u8], @@ -101,7 +68,7 @@ impl AssetDynamicDetailsV0 { update_field(&mut current_val.is_burnt, &new_val.is_burnt); update_field(&mut current_val.creators, &new_val.creators); update_field(&mut current_val.royalty_amount, &new_val.royalty_amount); - update_field(&mut current_val.was_decompressed, &new_val.was_decompressed); + update_optional_field(&mut current_val.was_decompressed, &new_val.was_decompressed); update_optional_field(&mut current_val.onchain_data, &new_val.onchain_data); update_field(&mut current_val.url, &new_val.url); update_optional_field( @@ -142,11 +109,3 @@ impl AssetDynamicDetailsV0 { result.and_then(|result| serialize(&result).ok()) } } - -pub(crate) struct ExternalPluginsMigration; -impl RocksMigration for ExternalPluginsMigration { - const VERSION: u64 = 1; - const SERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; - type NewDataType = AssetDynamicDetailsWithoutExtentions; - type OldDataType = AssetDynamicDetailsV0; -} diff --git a/rocks-db/src/migrations/spl2022.rs b/rocks-db/src/migrations/spl2022.rs index e99dadcaf..e73a0e9a6 100644 --- a/rocks-db/src/migrations/spl2022.rs +++ b/rocks-db/src/migrations/spl2022.rs @@ -43,171 +43,6 @@ impl From for TokenAccount { } } -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct AssetDynamicDetailsWithoutExtentions { - pub pubkey: Pubkey, - pub is_compressible: Updated, - pub is_compressed: Updated, - pub is_frozen: Updated, - pub supply: Option>, - pub seq: Option>, - pub is_burnt: Updated, - pub was_decompressed: Updated, - pub onchain_data: Option>, - pub creators: Updated>, - pub royalty_amount: Updated, - pub url: Updated, - pub chain_mutability: Option>, - pub lamports: Option>, - pub executable: Option>, - pub metadata_owner: Option>, - pub raw_name: Option>, - pub mpl_core_plugins: Option>, - pub mpl_core_unknown_plugins: Option>, - pub rent_epoch: Option>, - pub num_minted: Option>, - pub current_size: Option>, - pub plugins_json_version: Option>, - pub mpl_core_external_plugins: Option>, - pub mpl_core_unknown_external_plugins: Option>, -} - -impl TypedColumn for AssetDynamicDetailsWithoutExtentions { - type KeyType = Pubkey; - type ValueType = Self; - const NAME: &'static str = "ASSET_DYNAMIC_V2"; - - fn encode_key(pubkey: Pubkey) -> Vec { - encode_pubkey(pubkey) - } - - fn decode_key(bytes: Vec) -> crate::Result { - decode_pubkey(bytes) - } -} - -impl AssetDynamicDetailsWithoutExtentions { - pub fn merge_dynamic_details( - _new_key: &[u8], - existing_val: Option<&[u8]>, - operands: &MergeOperands, - ) -> Option> { - let mut result: Option = None; - if let Some(existing_val) = existing_val { - match deserialize::(existing_val) { - Ok(value) => { - result = Some(value); - } - Err(e) => { - error!( - "RocksDB: AssetDynamicDetailsWithoutExtentions deserialize existing_val: {}", - e - ) - } - } - } - - for op in operands { - match deserialize::(op) { - Ok(new_val) => { - result = Some(if let Some(mut current_val) = result { - update_field(&mut current_val.is_compressible, &new_val.is_compressible); - update_field(&mut current_val.is_compressed, &new_val.is_compressed); - update_field(&mut current_val.is_frozen, &new_val.is_frozen); - update_optional_field(&mut current_val.supply, &new_val.supply); - update_optional_field(&mut current_val.seq, &new_val.seq); - update_field(&mut current_val.is_burnt, &new_val.is_burnt); - update_field(&mut current_val.creators, &new_val.creators); - update_field(&mut current_val.royalty_amount, &new_val.royalty_amount); - update_field(&mut current_val.was_decompressed, &new_val.was_decompressed); - update_optional_field(&mut current_val.onchain_data, &new_val.onchain_data); - update_field(&mut current_val.url, &new_val.url); - update_optional_field( - &mut current_val.chain_mutability, - &new_val.chain_mutability, - ); - update_optional_field(&mut current_val.lamports, &new_val.lamports); - update_optional_field(&mut current_val.executable, &new_val.executable); - update_optional_field( - &mut current_val.metadata_owner, - &new_val.metadata_owner, - ); - update_optional_field(&mut current_val.raw_name, &new_val.raw_name); - update_optional_field( - &mut current_val.mpl_core_plugins, - &new_val.mpl_core_plugins, - ); - update_optional_field( - &mut current_val.mpl_core_unknown_plugins, - &new_val.mpl_core_unknown_plugins, - ); - update_optional_field(&mut current_val.num_minted, &new_val.num_minted); - update_optional_field(&mut current_val.current_size, &new_val.current_size); - update_optional_field(&mut current_val.rent_epoch, &new_val.rent_epoch); - update_optional_field( - &mut current_val.plugins_json_version, - &new_val.plugins_json_version, - ); - update_optional_field( - &mut current_val.mpl_core_external_plugins, - &new_val.mpl_core_external_plugins, - ); - update_optional_field( - &mut current_val.mpl_core_unknown_external_plugins, - &new_val.mpl_core_unknown_external_plugins, - ); - - current_val - } else { - new_val - }); - } - Err(e) => { - error!( - "RocksDB: AssetDynamicDetailsWithoutExtentions deserialize new_val: {}", - e - ) - } - } - } - - result.and_then(|result| serialize(&result).ok()) - } -} - -impl From for AssetDynamicDetails { - fn from(value: AssetDynamicDetailsWithoutExtentions) -> Self { - Self { - pubkey: value.pubkey, - is_compressible: value.is_compressible, - is_compressed: value.is_compressed, - is_frozen: value.is_frozen, - supply: value.supply, - seq: value.seq, - is_burnt: value.is_burnt, - was_decompressed: value.was_decompressed, - onchain_data: value.onchain_data, - creators: value.creators, - royalty_amount: value.royalty_amount, - url: value.url, - chain_mutability: value.chain_mutability, - lamports: value.lamports, - executable: value.executable, - metadata_owner: value.metadata_owner, - raw_name: value.raw_name, - mpl_core_plugins: value.mpl_core_plugins, - mpl_core_unknown_plugins: value.mpl_core_unknown_plugins, - rent_epoch: value.rent_epoch, - num_minted: value.num_minted, - current_size: value.current_size, - plugins_json_version: value.plugins_json_version, - mpl_core_external_plugins: value.mpl_core_external_plugins, - mpl_core_unknown_external_plugins: value.mpl_core_unknown_external_plugins, - mint_extensions: None, - } - } -} - pub(crate) struct TokenAccounts2022ExtentionsMigration; impl RocksMigration for TokenAccounts2022ExtentionsMigration { const VERSION: u64 = 3; @@ -215,11 +50,3 @@ impl RocksMigration for TokenAccounts2022ExtentionsMigration { type NewDataType = TokenAccount; type OldDataType = TokenAccountWithoutExtentions; } - -pub(crate) struct DynamicDataToken2022MintExtentionsMigration; -impl RocksMigration for DynamicDataToken2022MintExtentionsMigration { - const VERSION: u64 = 4; - const SERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; - type NewDataType = AssetDynamicDetails; - type OldDataType = AssetDynamicDetailsWithoutExtentions; -} diff --git a/rocks-db/src/migrator.rs b/rocks-db/src/migrator.rs index a2309ff1f..b6cff3815 100644 --- a/rocks-db/src/migrator.rs +++ b/rocks-db/src/migrator.rs @@ -1,8 +1,9 @@ +use crate::asset::{AssetCollection, AssetCompleteDetails}; use crate::column::{Column, TypedColumn}; use crate::errors::StorageError; use crate::key_encoders::{decode_u64, encode_u64}; -use crate::Result; use crate::Storage; +use crate::{AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, Result}; use bincode::deserialize; use interface::migration_version_manager::PrimaryStorageMigrationVersionManager; use metrics_utils::red::RequestErrorDurationMetrics; @@ -40,38 +41,100 @@ pub trait RocksMigration { + Into<::ValueType>; } +#[macro_export] +macro_rules! convert_and_merge { + ($column:expr, $builder:expr, $handle: expr, $db:expr) => {{ + let iter = $column.pairs_iterator($column.iter_start()); + let mut batch = rocksdb::WriteBatchWithTransaction::::default(); + for (k, v) in iter { + let asset_data = v.convert_to_fb(&mut $builder); + $builder.finish_minimal(asset_data); + batch.put_cf( + $handle, + Column::::encode_key(k), + $builder.finished_data(), + ); + $builder.reset(); + } + $db.write(batch)?; + }}; +} + impl Storage { pub async fn apply_all_migrations( db_path: &str, migration_storage_path: &str, migration_version_manager: Arc, ) -> Result<()> { - let applied_migrations = migration_version_manager - .get_all_applied_migrations() - .map_err(StorageError::Common)?; - let migration_applier = - MigrationApplier::new(db_path, migration_storage_path, applied_migrations); - - // apply all migrations - migration_applier - .apply_migration(crate::migrations::collection_authority::CollectionAuthorityMigration) - .await?; - migration_applier - .apply_migration(crate::migrations::external_plugins::ExternalPluginsMigration) - .await?; - migration_applier - .apply_migration( - crate::migrations::clean_update_authorities::CleanCollectionAuthoritiesMigration, - ) - .await?; - migration_applier - .apply_migration(crate::migrations::spl2022::TokenAccounts2022ExtentionsMigration) - .await?; - migration_applier - .apply_migration( - crate::migrations::spl2022::DynamicDataToken2022MintExtentionsMigration, - ) - .await?; + // TODO: how do I fix this for a brand new DB? + // let applied_migrations = migration_version_manager + // .get_all_applied_migrations() + // .map_err(StorageError::Common)?; + // let migration_applier = + // MigrationApplier::new(db_path, migration_storage_path, applied_migrations); + + // // apply all migrations + // migration_applier + // .apply_migration(crate::migrations::collection_authority::CollectionAuthorityMigration) + // .await?; + // migration_applier + // .apply_migration(crate::migrations::external_plugins::ExternalPluginsMigration) + // .await?; + // migration_applier + // .apply_migration( + // crate::migrations::clean_update_authorities::CleanCollectionAuthoritiesMigration, + // ) + // .await?; + // migration_applier + // .apply_migration(crate::migrations::spl2022::TokenAccounts2022ExtentionsMigration) + // .await?; + // migration_applier + // .apply_migration( + // crate::migrations::spl2022::DynamicDataToken2022MintExtentionsMigration, + // ) + // .await?; + Ok(()) + } + + pub async fn apply_migration_merge(&self) -> Result<()> { + let mut builder = flatbuffers::FlatBufferBuilder::with_capacity(2500); + convert_and_merge!( + self.asset_static_data, + builder, + &self.asset_data.handle(), + self.db + ); + convert_and_merge!( + self.asset_dynamic_data, + builder, + &self.asset_data.handle(), + self.db + ); + convert_and_merge!( + self.asset_authority_data, + builder, + &self.asset_data.handle(), + self.db + ); + convert_and_merge!( + self.asset_owner_data, + builder, + &self.asset_data.handle(), + self.db + ); + convert_and_merge!( + self.asset_collection_data, + builder, + &self.asset_data.handle(), + self.db + ); + + self.db.drop_cf(AssetStaticDetails::NAME)?; + self.db.drop_cf(AssetDynamicDetails::NAME)?; + self.db.drop_cf(AssetAuthority::NAME)?; + self.db.drop_cf(AssetOwner::NAME)?; + self.db.drop_cf(AssetCollection::NAME)?; + Ok(()) } } diff --git a/rocks-db/src/raw_block.rs b/rocks-db/src/raw_block.rs index 00ebc8a31..f65b027d2 100644 --- a/rocks-db/src/raw_block.rs +++ b/rocks-db/src/raw_block.rs @@ -1,14 +1,11 @@ use std::sync::Arc; -use crate::{column::TypedColumn, key_encoders, Storage}; +use crate::SlotStorage; +use crate::{column::TypedColumn, key_encoders}; use async_trait::async_trait; use entities::models::RawBlock; use interface::error::StorageError as InterfaceStorageError; -use interface::{ - error::BlockConsumeError, - signature_persistence::{BlockConsumer, BlockProducer}, -}; -use tracing::error; +use interface::signature_persistence::BlockProducer; impl TypedColumn for RawBlock { type KeyType = u64; @@ -26,39 +23,7 @@ impl TypedColumn for RawBlock { } #[async_trait] -impl BlockConsumer for Storage { - async fn consume_block( - &self, - slot: u64, - block: solana_transaction_status::UiConfirmedBlock, - ) -> Result<(), BlockConsumeError> { - let raw_block = RawBlock { slot, block }; - self.raw_blocks_cbor - .put_cbor_encoded(raw_block.slot, raw_block.clone()) - .await - .map_err(|e| { - error!( - "Failed to put raw block for slot: {}, error: {}", - raw_block.slot, e - ); - BlockConsumeError::PersistenceErr(e.into()) - }) - } - - async fn already_processed_slot(&self, slot: u64) -> Result { - self.raw_blocks_cbor - .get_cbor_encoded(slot) - .await - .map(|r| r.is_some()) - .map_err(|e| { - tracing::error!("Failed to get raw block for slot: {}, error: {}", slot, e); - BlockConsumeError::PersistenceErr(e.into()) - }) - } -} - -#[async_trait] -impl BlockProducer for Storage { +impl BlockProducer for SlotStorage { async fn get_block( &self, slot: u64, @@ -71,12 +36,9 @@ impl BlockProducer for Storage { .map_err(|e| InterfaceStorageError::Common(e.to_string()))?; if raw_block.is_none() { if let Some(backup_provider) = backup_provider { - let none_bp: Option> = None; + let none_bp: Option> = None; let block = backup_provider.get_block(slot, none_bp).await?; tracing::info!("Got block from backup provider for slot: {}", slot); - self.consume_block(slot, block.clone()) - .await - .map_err(|e| InterfaceStorageError::NotFound(e.to_string()))?; return Ok(block); } } diff --git a/rocks-db/src/raw_blocks_streaming_client.rs b/rocks-db/src/raw_blocks_streaming_client.rs index 5d6683506..7b09c8fc4 100644 --- a/rocks-db/src/raw_blocks_streaming_client.rs +++ b/rocks-db/src/raw_blocks_streaming_client.rs @@ -1,6 +1,6 @@ -use crate::column::TypedColumn; use crate::errors::StorageError; use crate::Storage; +use crate::{column::TypedColumn, SlotStorage}; use async_trait::async_trait; use entities::models::RawBlock; use interface::asset_streaming_and_discovery::{ @@ -12,14 +12,14 @@ use std::sync::Arc; use tokio_stream::wrappers::ReceiverStream; #[async_trait] -impl RawBlocksStreamer for Storage { +impl RawBlocksStreamer for SlotStorage { async fn get_raw_blocks_stream_in_range( &self, start_slot: u64, end_slot: u64, ) -> Result { let (tx, rx) = tokio::sync::mpsc::channel(32); - let backend = self.raw_blocks_cbor.backend.clone(); + let backend = self.db.clone(); let metrics = self.red_metrics.clone(); self.join_set.lock().await.spawn(tokio::spawn(async move { let _ = process_raw_blocks_range( diff --git a/rocks-db/src/slots_dumper.rs b/rocks-db/src/slots_dumper.rs deleted file mode 100644 index f850a6d0b..000000000 --- a/rocks-db/src/slots_dumper.rs +++ /dev/null @@ -1,44 +0,0 @@ -use crate::bubblegum_slots::{form_bubblegum_slots_key, BubblegumSlots}; -use crate::Storage; -use async_trait::async_trait; -use interface::slots_dumper::SlotsDumper; -use std::collections::HashMap; -use std::time::Duration; -use tracing::error; - -pub const PUT_SLOT_RETRIES: u32 = 5; -pub const SECONDS_TO_RETRY_ROCKSDB_OPERATION: u64 = 5; - -#[async_trait] -impl SlotsDumper for Storage { - async fn dump_slots(&self, slots: &[u64]) { - tracing::info!("Saving {} slots", slots.len()); - let slots_map: HashMap = slots.iter().fold( - HashMap::new(), - |mut acc: HashMap, slot| { - acc.insert(form_bubblegum_slots_key(*slot), BubblegumSlots {}); - acc - }, - ); - - if !slots_map.is_empty() { - let mut counter = PUT_SLOT_RETRIES; - while counter > 0 { - let put_result = self.bubblegum_slots.put_batch(slots_map.clone()).await; - - match put_result { - Ok(_) => { - break; - } - Err(err) => { - error!("Error putting slots: {}", err); - counter -= 1; - tokio::time::sleep(Duration::from_secs(SECONDS_TO_RETRY_ROCKSDB_OPERATION)) - .await; - continue; - } - } - } - } - } -} diff --git a/rocks-db/src/storage_traits.rs b/rocks-db/src/storage_traits.rs index dadb5b212..223be2c83 100644 --- a/rocks-db/src/storage_traits.rs +++ b/rocks-db/src/storage_traits.rs @@ -1,4 +1,4 @@ -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use async_trait::async_trait; use mockall::automock; @@ -50,13 +50,12 @@ pub trait AssetIndexReader { async fn get_fungible_assets_indexes( &self, keys: &[Pubkey], - ) -> Result>; + ) -> Result>; async fn get_nft_asset_indexes<'a>( &self, keys: &[Pubkey], - collection_authorities: Option<&'a HashMap>, - ) -> Result>; + ) -> Result>; } #[automock] @@ -137,7 +136,7 @@ impl AssetIndexReader for MockAssetIndexStorage { async fn get_fungible_assets_indexes( &self, keys: &[Pubkey], - ) -> Result> { + ) -> Result> { self.mock_asset_index_reader .get_fungible_assets_indexes(keys) .await @@ -146,10 +145,9 @@ impl AssetIndexReader for MockAssetIndexStorage { async fn get_nft_asset_indexes<'a>( &self, keys: &[Pubkey], - collection_authorities: Option<&'a HashMap>, - ) -> Result> { + ) -> Result> { self.mock_asset_index_reader - .get_nft_asset_indexes(keys, collection_authorities) + .get_nft_asset_indexes(keys) .await } } diff --git a/rocks-db/src/token_accounts.rs b/rocks-db/src/token_accounts.rs index 8bb86ef61..88855a3d9 100644 --- a/rocks-db/src/token_accounts.rs +++ b/rocks-db/src/token_accounts.rs @@ -115,7 +115,7 @@ macro_rules! impl_merge_values { for op in operands { match deserialize::(op) { Ok(new_val) => { - if new_val.write_version > write_version { + if new_val.write_version > write_version || result.is_empty() { write_version = new_val.write_version; result = op.to_vec(); } @@ -154,7 +154,7 @@ pub fn merge_token_accounts( for op in operands { match deserialize::(op) { Ok(new_val) => { - if new_val.write_version > write_version { + if new_val.write_version > write_version || result.is_empty() { write_version = new_val.write_version; result = op.to_vec(); } @@ -352,7 +352,7 @@ pub fn merge_mints( for op in operands { match deserialize::(op) { Ok(new_val) => { - if new_val.write_version > write_version { + if new_val.write_version > write_version || result.is_empty() { write_version = new_val.write_version; result = op.to_vec(); } diff --git a/rocks-db/src/transaction_client.rs b/rocks-db/src/transaction_client.rs index 70fbafeab..ab1b3f223 100644 --- a/rocks-db/src/transaction_client.rs +++ b/rocks-db/src/transaction_client.rs @@ -3,6 +3,8 @@ use entities::models::SignatureWithSlot; use interface::error::StorageError; use solana_sdk::pubkey::Pubkey; +use crate::asset::AssetCompleteDetails; +use crate::column::TypedColumn; use crate::parameters::Parameter; use crate::{ parameters, @@ -76,51 +78,36 @@ impl Storage { ix: &InstructionResult, ) -> Result<(), StorageError> { if let Some(ref update) = ix.update { - if let Some(ref dyn_data) = update.update { - if let Err(e) = self.save_tx_data_and_asset_updated_with_batch( - batch, - dyn_data.pk, - dyn_data.slot, - &dyn_data.leaf, - &dyn_data.dynamic_data, - ) { - tracing::error!("Failed to save tx data and asset updated: {}", e); - } - } - if let Some(ref static_update) = update.static_update { - if let Err(e) = self.asset_static_data.merge_with_batch( - batch, - static_update.pk, - &static_update.details, - ) { - tracing::error!("Failed to merge asset static data: {}", e); - } - } - if let Some(ref owner_update) = update.owner_update { - if let Err(e) = self.asset_owner_data.merge_with_batch( - batch, - owner_update.pk, - &owner_update.details, - ) { - tracing::error!("Failed to merge asset owner data: {}", e); - } - } - if let Some(ref authority_update) = update.authority_update { - if let Err(e) = self.asset_authority_data.merge_with_batch( - batch, - authority_update.pk, - &authority_update.details, - ) { - tracing::error!("Failed to merge asset authority data: {}", e); - } - } - if let Some(ref collection_update) = update.collection_update { - if let Err(e) = self.asset_collection_data.merge_with_batch( - batch, - collection_update.pk, - &collection_update.details, - ) { - tracing::error!("Failed to merge asset collection data: {}", e); + let pk = update + .static_update + .as_ref() + .map(|s| s.pk) + .or(update.update.as_ref().map(|u| u.pk)) + .or(update.owner_update.as_ref().map(|o| o.pk)) + .or(update.authority_update.as_ref().map(|a| a.pk)) + .or(update.collection_update.as_ref().map(|c| c.pk)); + if let Some(pk) = pk { + let acd = AssetCompleteDetails { + pubkey: pk, + static_details: update.static_update.as_ref().map(|s| s.details.clone()), + dynamic_details: update.update.as_ref().and_then(|u| u.dynamic_data.clone()), + owner: update.owner_update.as_ref().map(|o| o.details.clone()), + authority: update.authority_update.as_ref().map(|a| a.details.clone()), + collection: update.collection_update.as_ref().map(|c| c.details.clone()), + }; + let mut builder = flatbuffers::FlatBufferBuilder::with_capacity(2500); + let acd = acd.convert_to_fb(&mut builder); + builder.finish_minimal(acd); + batch.merge_cf( + &self.db.cf_handle(AssetCompleteDetails::NAME).unwrap(), + pk, + builder.finished_data(), + ); + if let Some(leaf) = update.update.as_ref().and_then(|u| u.leaf.as_ref()) { + self.asset_leaf_data.merge_with_batch(batch, pk, leaf)? + }; + if let Some(slot) = update.update.as_ref().map(|u| u.slot) { + self.asset_updated_with_batch(batch, slot, pk)?; } } @@ -148,13 +135,14 @@ impl Storage { if let Some(ref decompressed) = ix.decompressed { self.asset_leaf_data .delete_with_batch(batch, decompressed.pk); - if let Err(e) = self.asset_dynamic_data.merge_with_batch( - batch, + let mut builder = flatbuffers::FlatBufferBuilder::with_capacity(2500); + let acd = decompressed.details.convert_to_fb(&mut builder); + builder.finish_minimal(acd); + batch.merge_cf( + &self.db.cf_handle(AssetCompleteDetails::NAME).unwrap(), decompressed.pk, - &decompressed.details, - ) { - tracing::error!("Failed to save tx data and asset updated: {}", e); - } + builder.finished_data(), + ); } if let Some(ref tree_update) = ix.tree_update { self.save_changelog_with_batch(batch, &tree_update.event, tree_update.slot); diff --git a/rocks-db/tests/asset_streaming_client_tests.rs b/rocks-db/tests/asset_streaming_client_tests.rs index b29df646c..d5ef20f1f 100644 --- a/rocks-db/tests/asset_streaming_client_tests.rs +++ b/rocks-db/tests/asset_streaming_client_tests.rs @@ -82,9 +82,9 @@ mod tests { async fn test_get_asset_details_stream_in_range_data() { let cnt = 1000; let env = RocksTestEnvironment::new(&[]); - let storage = &env.storage; let slot = 100; let pks = env.generate_assets(cnt, slot).await; + let storage = &env.storage; // Call get_asset_details_stream_in_range on a database let response = storage.get_asset_details_stream_in_range(100, 200).await; diff --git a/rocks-db/tests/batch_client_integration_tests.rs b/rocks-db/tests/batch_client_integration_tests.rs index d8fdf4203..724d375e1 100644 --- a/rocks-db/tests/batch_client_integration_tests.rs +++ b/rocks-db/tests/batch_client_integration_tests.rs @@ -355,7 +355,10 @@ mod tests { async fn test_multiple_slot_updates() { let storage = RocksTestEnvironment::new(&[]).storage; let pk = Pubkey::new_unique(); - let dynamic_data = create_test_dynamic_data(pk, 0, "http://example.com".to_string()); + let mut dynamic_data = create_test_dynamic_data(pk, 0, "http://example.com".to_string()); + dynamic_data.is_compressible = Updated::new(0, None, false); + dynamic_data.is_compressed = Updated::new(0, None, false); + dynamic_data.supply = Some(Updated::new(0, None, 1)); storage .asset_dynamic_data diff --git a/tests/setup/Cargo.toml b/tests/setup/Cargo.toml index 1fe6a0e3e..8351a0e32 100644 --- a/tests/setup/Cargo.toml +++ b/tests/setup/Cargo.toml @@ -19,4 +19,6 @@ sqlx = { workspace = true } testcontainers = { workspace = true } testcontainers-modules = { workspace = true } uuid = { workspace = true } -tracing = { workspace = true } \ No newline at end of file +tracing = { workspace = true } +rocksdb = { workspace = true } +flatbuffers = { version="24.3.25", features = ["serialize"]} diff --git a/tests/setup/src/rocks.rs b/tests/setup/src/rocks.rs index fe39be25a..fd3ab6cf8 100644 --- a/tests/setup/src/rocks.rs +++ b/tests/setup/src/rocks.rs @@ -2,6 +2,8 @@ use std::sync::Arc; use entities::models::{OffChainData, Updated}; use rand::{random, Rng}; +use rocks_db::asset::AssetCompleteDetails; +use rocks_db::column::TypedColumn; use solana_sdk::pubkey::Pubkey; use tempfile::TempDir; @@ -141,50 +143,42 @@ impl RocksTestEnvironment { }, )?; - let static_data_batch = self.storage.asset_static_data.put_batch( - generated_assets - .static_details - .iter() - .map(|value| (value.pubkey, value.clone())) - .collect(), - ); - let authority_batch = self.storage.asset_authority_data.put_batch( - generated_assets - .authorities - .iter() - .map(|value| (value.pubkey, value.clone())) - .collect(), - ); - let owners_batch = self.storage.asset_owner_data.put_batch( - generated_assets - .owners - .iter() - .map(|value| (value.pubkey, value.clone())) - .collect(), - ); - let dynamic_details_batch = self.storage.asset_dynamic_data.put_batch( - generated_assets - .dynamic_details - .iter() - .map(|value| (value.pubkey, value.clone())) - .collect(), - ); - let collections_batch = self.storage.asset_collection_data.put_batch( - generated_assets - .collections - .iter() - .map(|value| (value.pubkey, value.clone())) - .collect(), - ); - - tokio::try_join!( - static_data_batch, - authority_batch, - owners_batch, - dynamic_details_batch, - collections_batch - )?; - + let mut builder = flatbuffers::FlatBufferBuilder::with_capacity(2500); + let mut batch = rocksdb::WriteBatchWithTransaction::::default(); + generated_assets + .pubkeys + .iter() + .zip(generated_assets.static_details.iter()) + .zip(generated_assets.dynamic_details.iter()) + .zip(generated_assets.collections.iter()) + .zip(generated_assets.owners.iter()) + .enumerate() + .for_each( + |(index, ((((pubkey, static_details), dynamic_details), collection), owner))| { + let authority = generated_assets.authorities.get(index); + let complete_asset = AssetCompleteDetails { + pubkey: *pubkey, + static_details: Some(static_details.clone()), + dynamic_details: Some(dynamic_details.clone()), + authority: authority.cloned(), + collection: Some(collection.clone()), + owner: Some(owner.clone()), + }; + let asset_data = complete_asset.convert_to_fb(&mut builder); + builder.finish_minimal(asset_data); + batch.put_cf( + &self + .storage + .db + .cf_handle(AssetCompleteDetails::NAME) + .unwrap(), + *pubkey, + builder.finished_data(), + ); + builder.reset(); + }, + ); + self.storage.db.write(batch)?; Ok(()) } } @@ -242,6 +236,7 @@ impl RocksTestEnvironmentSetup { rand::thread_rng().gen_range(0..100), )), delegate: generate_test_updated(Some(Pubkey::new_unique())), + is_current_owner: generate_test_updated(true), }) .collect() } diff --git a/usecase/src/bigtable.rs b/usecase/src/bigtable.rs index 5fd9b822c..b68d98fd1 100644 --- a/usecase/src/bigtable.rs +++ b/usecase/src/bigtable.rs @@ -10,10 +10,10 @@ use solana_transaction_status::{ }; use std::sync::Arc; use std::time::Duration; -use tracing::error; +use tracing::{error, warn}; pub const GET_DATA_FROM_BG_RETRIES: u32 = 5; -pub const SECONDS_TO_RETRY_GET_DATA_FROM_BG: u64 = 5; +pub const SECONDS_TO_RETRY_GET_DATA_FROM_BG: u64 = 2; pub struct BigTableClient { pub big_table_client: Arc, @@ -72,7 +72,8 @@ impl BlockProducer for BigTableClient { let mut block = match self.big_table_client.get_confirmed_block(slot).await { Ok(block) => block, Err(err) => { - error!("Error getting block: {}", err); + // as this will be retried we're logging as warn. If the error persists, it will be returned as error + warn!("Error getting block: {}, retrying", err); counter -= 1; if counter == 0 { return Err(StorageError::Common(format!( diff --git a/usecase/src/slots_collector.rs b/usecase/src/slots_collector.rs index 8b1651dd2..0b5ad5236 100644 --- a/usecase/src/slots_collector.rs +++ b/usecase/src/slots_collector.rs @@ -17,7 +17,7 @@ pub const GET_SIGNATURES_LIMIT: i64 = 2000; #[automock] #[async_trait] pub trait SlotsGetter { - async fn get_slots( + async fn get_slots_sorted_desc( &self, collected_key: &solana_program::pubkey::Pubkey, start_at: u64, @@ -27,7 +27,7 @@ pub trait SlotsGetter { #[async_trait] impl SlotsGetter for BigTableConnection { - async fn get_slots( + async fn get_slots_sorted_desc( &self, collected_key: &solana_program::pubkey::Pubkey, start_at: u64, @@ -110,7 +110,7 @@ where } let slots = self .row_keys_getter - .get_slots(collected_pubkey, start_at_slot, GET_SIGNATURES_LIMIT) + .get_slots_sorted_desc(collected_pubkey, start_at_slot, GET_SIGNATURES_LIMIT) .await; match slots { Ok(s) => {