Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add env feature for clap #314

Open
wants to merge 5 commits into
base: feature/MTG-868-slots-storage
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
130 changes: 16 additions & 114 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,120 +1,22 @@
# Required by Postgre container
POSTGRE_DB_PATH="postgre/db/path"
ASSETS_ROCKS_DB_PATH="path/to/assets/db"
ASSETS_ROCKS_DB_SECONDARY_PATH="path/to/assets_secondary/db"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this one will be per service most likely. As it's secondary. Sometimes we could use temp dir for it or if we expect it to grow during the runtime.


RUST_BACKTRACE=1
# Ingester instance config
INGESTER_LOG_LEVEL=info
SLOTS_ROCKS_DB_PATH="path/to/slots/db"
SLOTS_ROCKS_DB_SECONDARY_PATH="path/to/slots_secondary/db"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same here


INGESTER_DATABASE_CONFIG='{max_postgres_connections=10, url="postgres://user:[email protected]:5432/database"}'
INGESTER_TCP_CONFIG='{receiver_addr="localhost:2000", receiver_reconnect_interval=5, snapshot_receiver_addr="localhost:5000"}'
INGESTER_REDIS_MESSENGER_CONFIG='{messenger_type="Redis", connection_config={redis_connection_str="redis://:pass@localhost:6379"}}'
INGESTER_MESSAGE_SOURCE=Redis #TCP or Redis
SOLANA_RPC="http://localhost:8080"

INGESTER_ACCOUNTS_BUFFER_SIZE=250
INGESTER_ACCOUNTS_PARSING_WORKERS=20
INGESTER_TRANSACTIONS_PARSING_WORKERS=20
BIG_TABLE_CREDENTIALS="path/to/creds"
BIG_TABLE_TIMEOUT=10

INGESTER_SNAPSHOT_PARSING_WORKERS=1
INGESTER_SNAPSHOT_PARSING_BATCH_SIZE=250
SLOT_PERSISTER_START_SLOT=10
SLOT_PERSISTER_SLOTS=[1,2,3]
SLOT_PERSISTER_CHUNK_SIZE=1000
SLOT_PERSISTER_MAX_CONCURRENCY=100
SLOT_PERSISTER_METRICS_PORT=6090

INGESTER_GAPFILLER_PEER_ADDR="0.0.0.0"
INGESTER_METRICS_PORT=9091
INGESTER_SERVER_PORT=9092
INGESTER_PEER_GRPC_PORT=9099
SLOT_CHECKER_SLOTS=[1,2,3]

INGESTER_ROCKS_DB_PATH_CONTAINER="/usr/src/rocksdb-data"
INGESTER_ROCKS_DB_PATH="path/to/rocks/on/disk"

INGESTER_ARCHIVES_DIR="path/to/rocks/backup/archives"
INGESTER_ROCKS_BACKUP_ARCHIVES_DIR="path/to/rocks/backup/archives"
INGESTER_ROCKS_BACKUP_DIR="path/to/rocks/backup/"

INGESTER_BACKFILL_RPC_ADDRESS='https://rpc:port'
INGESTER_RPC_HOST='https://rpc:port'

INGESTER_BACKFILLER_SOURCE_MODE=RPC #RPC or Bigtable
INGESTER_BIG_TABLE_CONFIG='{creds="/usr/src/app/creds.json", timeout=1000}'

INGESTER_RUN_SEQUENCE_CONSISTENT_CHECKER=true
# Optional, required only if it needs to run fork cleaner, default is false. Unstable as it removes forked items, but also removes some valid leafs. Recommended to use only! for testing purposes.
INGESTER_RUN_FORK_CLEANER=false
INGESTER_RUN_BUBBLEGUM_BACKFILLER=true

INGESTER_BACKFILLER_MODE=PersistAndIngest
INGESTER_SLOT_UNTIL=0
INGESTER_SLOT_START_FROM=0
INGESTER_WORKERS_COUNT=100
INGESTER_CHUNK_SIZE=20
INGESTER_PERMITTED_TASKS=1
INGESTER_WAIT_PERIOD_SEC=30
INGESTER_SHOULD_REINGEST=false

INGESTER_PEER_GRPC_MAX_GAP_SLOTS=1000000

INGESTER_RUN_PROFILING=false
INGESTER_PROFILING_FILE_PATH_CONTAINER="/usr/src/profiling"
INGESTER_PROFILING_FILE_PATH="/path/to/profiling"

INGESTER_FILE_STORAGE_PATH_CONTAINER="/usr/src/app/file_storage"
INGESTER_FILE_STORAGE_PATH="path/to/file/storage"
INGESTER_MIGRATION_STORAGE_PATH=/path/to/migration_storage

INGESTER_ROCKS_FLUSH_BEFORE_BACKUP=false
INGESTER_ROCKS_INTERVAL_IN_SECONDS=3600
INGESTER_ROCKS_SYNC_INTERVAL_SECONDS=2

INGESTER_SYNCHRONIZER_DUMP_PATH="/path/to/dump"

# API instance config
API_LOG_LEVEL=info

API_DATABASE_CONFIG='{max_postgres_connections=250, url="postgres://user:[email protected]:5432/database"}'

API_ROCKS_DB_PATH_CONTAINER="/usr/src/rocksdb-data"
API_ROCKS_DB_SECONDARY_PATH_CONTAINER="path/to/rocks/secondary/db"
API_ARCHIVES_DIR="path/to/rocks/backup/archives"

API_PEER_GRPC_PORT=8991
API_METRICS_PORT=8985
API_SERVER_PORT=8990

API_RPC_HOST='https://rpc:port'

API_ROCKS_SYNC_INTERVAL_SECONDS=2
API_FILE_STORAGE_PATH_CONTAINER="/usr/src/app/file_storage"
API_FILE_STORAGE_PATH="path/to/file/storage"

API_PEER_GRPC_MAX_GAP_SLOTS=1000000
API_JSON_MIDDLEWARE_CONFIG='{is_enabled=true, max_urls_to_parse=10}'

API_CONSISTENCE_SYNCHRONIZATION_API_THRESHOLD=1000000
API_CONSISTENCE_BACKFILLING_SLOTS_THRESHOLD=500

# if set to true API will not check if tree where user requests assets from has any gaps
API_SKIP_CHECK_TREE_GAPS=false

# Synchronizer instance config
SYNCHRONIZER_LOG_LEVEL=info

SYNCHRONIZER_DATABASE_CONFIG='{max_postgres_connections=100, url="postgres://user:[email protected]:5432/database"}'
SYNCHRONIZER_ROCKS_DB_PATH_CONTAINER="/usr/src/rocksdb-data"
SYNCHRONIZER_ROCKS_DB_SECONDARY_PATH_CONTAINER="path/to/rocks/secondary/db"

SYNCHRONIZER_METRICS_PORT=6091

SYNCHRONIZER_DUMP_PATH="/path/to/migration_data"

SYNCHRONIZER_DUMP_SYNCHRONIZER_BATCH_SIZE=10000
SYNCHRONIZER_DUMP_SYNC_THRESHOLD=50000000

SYNCHRONIZER_PARALLEL_TASKS=30

# Profiling config
# Optional, required only if it needs to run memory profiling
MALLOC_CONF="prof:true,prof_leak:true,prof_final:true,prof_active:true,prof_prefix:/usr/src/app/heaps/,lg_prof_interval:32,lg_prof_sample:19"

# Integrity verification
INTEGRITY_VERIFICATION_TEST_FILE_PATH="./test_keys/test_keys.txt"
INTEGRITY_VERIFICATION_TEST_FILE_PATH_CONTAINER="/test_keys/test_keys.txt"
INTEGRITY_VERIFICATION_SLOTS_COLLECT_PATH="./slots_collect"
INTEGRITY_VERIFICATION_SLOTS_COLLECT_PATH_CONTAINER="/slots_collect"
BACKFILL_START_SLOT=10
BACKFILL_WORKERS=50
BACKFILL_SLOTS=[1,2,3]
18 changes: 9 additions & 9 deletions nft_ingester/src/bin/slot_persister/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,40 +41,40 @@ const SLOT_COLLECTION_OFFSET: u64 = 300;
)]
struct Args {
/// Path to the target RocksDB instance with slots
#[arg(short, long, env)]
#[arg(short, long, env="ASSETS_ROCKS_DB_PATH")]
target_db_path: PathBuf,

/// RPC host
#[arg(short, long, env)]
#[arg(short, long, env="SOLANA_RPC")]
rpc_host: String,

/// Optional starting slot number, this will override the last saved slot in the RocksDB
#[arg(short, long, env)]
#[arg(short, long, env="SLOT_PERSISTER_START_SLOT")]
start_slot: Option<u64>,

/// Big table credentials file path
#[arg(short, long, env)]
#[arg(short, long, env="BIG_TABLE_CREDENTIALS")]
big_table_credentials: Option<String>,

/// Optional big table timeout (default: 1000)
#[arg(short = 'B', long, env, default_value_t = 1000)]
#[arg(short = 'B', long, env="BIG_TABLE_TIMEOUT", default_value_t = 1000)]
big_table_timeout: u32,

/// Metrics port
/// Default: 9090
#[arg(short, long, env, default_value = "9090")]
#[arg(short, long, env="SLOT_PERSISTER_METRICS_PORT", default_value = "9090")]
metrics_port: u16,

/// Number of slots to process in each batch
#[arg(short, long, env, default_value_t = 200)]
#[arg(short, long, env="SLOT_PERSISTER_CHUNK_SIZE", default_value_t = 200)]
chunk_size: usize,

/// Maximum number of concurrent requests
#[arg(short = 'M', long, env, default_value_t = 20)]
#[arg(short = 'M', long, env="SLOT_PERSISTER_MAX_CONCURRENCY", default_value_t = 20)]
max_concurrency: usize,

/// Optional comma-separated list of slot numbers to check
#[arg(long, env)]
#[arg(long, env="SLOT_PERSISTER_SLOTS")]
slots: Option<String>,
}
pub struct InMemorySlotsDumper {
Expand Down