Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove SQL for (most) Ledger State #4575

Merged
merged 4 commits into from
Dec 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 0 additions & 13 deletions docs/stellar-core_example.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -229,14 +229,6 @@ FLOOD_DEMAND_BACKOFF_DELAY_MS = 500
# against each other.
MAX_DEX_TX_OPERATIONS_IN_TX_SET = 0

# DEPRECATED_SQL_LEDGER_STATE (bool) default false
# When set to true, SQL is used to store all ledger state instead of
# BucketListDB. This is not recommended and may cause performance degregradation.
# This is deprecated and will be removed in the future. Note that offers table
# is still maintained in SQL when this is set to false, but all other ledger
# state tables are dropped.
DEPRECATED_SQL_LEDGER_STATE = false

# BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT (Integer) default 14
# Determines page size used by BucketListDB for range indexes, where
# pageSize == 2^BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT. If set to
Expand All @@ -258,11 +250,6 @@ BUCKETLIST_DB_INDEX_CUTOFF = 20
# this value is ingnored and indexes are never persisted.
BUCKETLIST_DB_PERSIST_INDEX = true

# BACKGROUND_EVICTION_SCAN (bool) default true
# Determines whether eviction scans occur in the background thread. Requires
# that DEPRECATED_SQL_LEDGER_STATE is set to false.
BACKGROUND_EVICTION_SCAN = true

# EXPERIMENTAL_BACKGROUND_OVERLAY_PROCESSING (bool) default false
# Determines whether some of overlay processing occurs in the background
# thread.
Expand Down
1 change: 0 additions & 1 deletion docs/stellar-core_example_validators.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ PUBLIC_HTTP_PORT=false
NETWORK_PASSPHRASE="Example configuration"

DATABASE="sqlite3://example.db"
DEPRECATED_SQL_LEDGER_STATE = false

NODE_SEED="SA7FGJMMUIHNE3ZPI2UO5I632A7O5FBAZTXFAIEVFA4DSSGLHXACLAIT a3"
NODE_HOME_DOMAIN="domainA"
Expand Down
1 change: 0 additions & 1 deletion docs/stellar-core_standalone.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ NODE_IS_VALIDATOR=true

#DATABASE="postgresql://dbname=stellar user=postgres password=password host=localhost"
DATABASE="sqlite3://stellar.db"
DEPRECATED_SQL_LEDGER_STATE = false

COMMANDS=["ll?level=debug"]

Expand Down
1 change: 0 additions & 1 deletion docs/stellar-core_testnet.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ PUBLIC_HTTP_PORT=false
NETWORK_PASSPHRASE="Test SDF Network ; September 2015"

DATABASE="sqlite3://stellar.db"
DEPRECATED_SQL_LEDGER_STATE = false

# Stellar Testnet validators
[[HOME_DOMAINS]]
Expand Down
1 change: 0 additions & 1 deletion docs/stellar-core_testnet_legacy.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ KNOWN_PEERS=[
"core-testnet3.stellar.org"]

DATABASE="sqlite3://stellar.db"
DEPRECATED_SQL_LEDGER_STATE = false
UNSAFE_QUORUM=true
FAILURE_SAFETY=1

Expand Down
1 change: 0 additions & 1 deletion docs/stellar-core_testnet_validator.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ PUBLIC_HTTP_PORT=false
NETWORK_PASSPHRASE="Test SDF Network ; September 2015"

DATABASE="sqlite3://stellar.db"
DEPRECATED_SQL_LEDGER_STATE = false

# Configuring the node as a validator
# note that this is an unsafe configuration in this particular setup:
Expand Down
118 changes: 44 additions & 74 deletions src/bucket/BucketApplicator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include "ledger/LedgerTxn.h"
#include "ledger/LedgerTxnEntry.h"
#include "main/Application.h"
#include "util/GlobalChecks.h"
#include "util/Logging.h"
#include "util/types.h"
#include <fmt/format.h>
Expand All @@ -21,14 +22,12 @@ BucketApplicator::BucketApplicator(Application& app,
uint32_t minProtocolVersionSeen,
uint32_t level,
std::shared_ptr<LiveBucket const> bucket,
std::function<bool(LedgerEntryType)> filter,
std::unordered_set<LedgerKey>& seenKeys)
: mApp(app)
, mMaxProtocolVersion(maxProtocolVersion)
, mMinProtocolVersionSeen(minProtocolVersionSeen)
, mLevel(level)
, mBucketIter(bucket)
, mEntryTypeFilter(filter)
, mSeenKeys(seenKeys)
{
auto protocolVersion = mBucketIter.getMetadata().ledgerVersion;
Expand All @@ -40,8 +39,8 @@ BucketApplicator::BucketApplicator(Application& app,
protocolVersion, mMaxProtocolVersion));
}

// Only apply offers if BucketListDB is enabled
if (mApp.getConfig().isUsingBucketListDB() && !bucket->isEmpty())
// Only apply offers
if (!bucket->isEmpty())
{
auto offsetOp = bucket->getOfferRange();
if (offsetOp)
Expand All @@ -62,10 +61,8 @@ BucketApplicator::operator bool() const
{
// There is more work to do (i.e. (bool) *this == true) iff:
// 1. The underlying bucket iterator is not EOF and
// 2. Either BucketListDB is not enabled (so we must apply all entry types)
// or BucketListDB is enabled and we have offers still remaining.
return static_cast<bool>(mBucketIter) &&
(!mApp.getConfig().isUsingBucketListDB() || mOffersRemaining);
// 2. We have offers still remaining.
return static_cast<bool>(mBucketIter) && mOffersRemaining;
}

size_t
Expand All @@ -81,20 +78,19 @@ BucketApplicator::size() const
}

static bool
shouldApplyEntry(std::function<bool(LedgerEntryType)> const& filter,
BucketEntry const& e)
shouldApplyEntry(BucketEntry const& e)
{
if (e.type() == LIVEENTRY || e.type() == INITENTRY)
{
return filter(e.liveEntry().data.type());
return BucketIndex::typeNotSupported(e.liveEntry().data.type());
}

if (e.type() != DEADENTRY)
{
throw std::runtime_error(
"Malformed bucket: unexpected non-INIT/LIVE/DEAD entry.");
}
return filter(e.deadEntry().type());
return BucketIndex::typeNotSupported(e.deadEntry().type());
}

size_t
Expand Down Expand Up @@ -129,8 +125,7 @@ BucketApplicator::advance(BucketApplicator::Counters& counters)
// returns the file offset at the end of the currently loaded entry.
// This means we must read until pos is strictly greater than the upper
// bound so that we don't skip the last offer in the range.
auto isUsingBucketListDB = mApp.getConfig().isUsingBucketListDB();
if (isUsingBucketListDB && mBucketIter.pos() > mUpperBoundOffset)
if (mBucketIter.pos() > mUpperBoundOffset)
{
mOffersRemaining = false;
break;
Expand All @@ -139,89 +134,64 @@ BucketApplicator::advance(BucketApplicator::Counters& counters)
BucketEntry const& e = *mBucketIter;
LiveBucket::checkProtocolLegality(e, mMaxProtocolVersion);

if (shouldApplyEntry(mEntryTypeFilter, e))
if (shouldApplyEntry(e))
{
if (isUsingBucketListDB)
if (e.type() == LIVEENTRY || e.type() == INITENTRY)
{
if (e.type() == LIVEENTRY || e.type() == INITENTRY)
{
auto [_, wasInserted] =
mSeenKeys.emplace(LedgerEntryKey(e.liveEntry()));
auto [_, wasInserted] =
mSeenKeys.emplace(LedgerEntryKey(e.liveEntry()));

// Skip seen keys
if (!wasInserted)
{
continue;
}
}
else
// Skip seen keys
if (!wasInserted)
{
// Only apply INIT and LIVE entries
mSeenKeys.emplace(e.deadEntry());
continue;
}
}
else
{
// Only apply INIT and LIVE entries
mSeenKeys.emplace(e.deadEntry());
continue;
}

counters.mark(e);

if (e.type() == LIVEENTRY || e.type() == INITENTRY)
// DEAD and META entries skipped
releaseAssert(e.type() == LIVEENTRY || e.type() == INITENTRY);
// The last level can have live entries, but at that point we
// know that they are actually init entries because the earliest
// state of all entries is init, so we mark them as such here
if (mLevel == LiveBucketList::kNumLevels - 1 &&
e.type() == LIVEENTRY)
{
// The last level can have live entries, but at that point we
// know that they are actually init entries because the earliest
// state of all entries is init, so we mark them as such here
if (mLevel == LiveBucketList::kNumLevels - 1 &&
e.type() == LIVEENTRY)
{
ltx->createWithoutLoading(e.liveEntry());
}
else if (
protocolVersionIsBefore(
mMinProtocolVersionSeen,
LiveBucket::
FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
ltx->createWithoutLoading(e.liveEntry());
}
else if (protocolVersionIsBefore(
mMinProtocolVersionSeen,
LiveBucket::
FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
{
// Prior to protocol 11, INITENTRY didn't exist, so we need
// to check ltx to see if this is an update or a create
auto key = InternalLedgerEntry(e.liveEntry()).toKey();
if (ltx->getNewestVersion(key))
{
// Prior to protocol 11, INITENTRY didn't exist, so we need
// to check ltx to see if this is an update or a create
auto key = InternalLedgerEntry(e.liveEntry()).toKey();
if (ltx->getNewestVersion(key))
{
ltx->updateWithoutLoading(e.liveEntry());
}
else
{
ltx->createWithoutLoading(e.liveEntry());
}
ltx->updateWithoutLoading(e.liveEntry());
}
else
{
if (e.type() == LIVEENTRY)
{
ltx->updateWithoutLoading(e.liveEntry());
}
else
{
ltx->createWithoutLoading(e.liveEntry());
}
ltx->createWithoutLoading(e.liveEntry());
}
}
else
{
releaseAssertOrThrow(!isUsingBucketListDB);
if (protocolVersionIsBefore(
mMinProtocolVersionSeen,
LiveBucket::
FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
if (e.type() == LIVEENTRY)
{
// Prior to protocol 11, DEAD entries could exist
// without LIVE entries in between
if (ltx->getNewestVersion(e.deadEntry()))
{
ltx->eraseWithoutLoading(e.deadEntry());
}
ltx->updateWithoutLoading(e.liveEntry());
}
else
{
ltx->eraseWithoutLoading(e.deadEntry());
ltx->createWithoutLoading(e.liveEntry());
}
}

Expand Down
2 changes: 0 additions & 2 deletions src/bucket/BucketApplicator.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ class BucketApplicator
uint32_t mLevel;
LiveBucketInputIterator mBucketIter;
size_t mCount{0};
std::function<bool(LedgerEntryType)> mEntryTypeFilter;
std::unordered_set<LedgerKey>& mSeenKeys;
std::streamoff mUpperBoundOffset{0};
bool mOffersRemaining{true};
Expand Down Expand Up @@ -73,7 +72,6 @@ class BucketApplicator
BucketApplicator(Application& app, uint32_t maxProtocolVersion,
uint32_t minProtocolVersionSeen, uint32_t level,
std::shared_ptr<LiveBucket const> bucket,
std::function<bool(LedgerEntryType)> filter,
std::unordered_set<LedgerKey>& seenKeys);
operator bool() const;
size_t advance(Counters& counters);
Expand Down
3 changes: 1 addition & 2 deletions src/bucket/BucketBase.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -397,8 +397,7 @@ BucketBase::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion,

MergeKey mk{keepTombstoneEntries, oldBucket->getHash(),
newBucket->getHash(), shadowHashes};
return out.getBucket(bucketManager,
bucketManager.getConfig().isUsingBucketListDB(), &mk);
return out.getBucket(bucketManager, &mk);
}

template std::shared_ptr<LiveBucket> BucketBase::merge<LiveBucket>(
Expand Down
5 changes: 2 additions & 3 deletions src/bucket/BucketIndexImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ BucketIndexImpl<IndexT>::BucketIndexImpl(BucketManager& bm,
ZoneValue(static_cast<int64_t>(count));
}

if (bm.getConfig().isPersistingBucketListDBIndexes())
if (bm.getConfig().BUCKETLIST_DB_PERSIST_INDEX)
{
saveToDisk(bm, hash, ctx);
}
Expand All @@ -260,7 +260,7 @@ BucketIndexImpl<BucketIndex::RangeIndex>::saveToDisk(
BucketManager& bm, Hash const& hash, asio::io_context& ctx) const
{
ZoneScoped;
releaseAssert(bm.getConfig().isPersistingBucketListDBIndexes());
releaseAssert(bm.getConfig().BUCKETLIST_DB_PERSIST_INDEX);
auto timer =
LogSlowExecution("Saving index", LogSlowExecution::Mode::AUTOMATIC_RAII,
"took", std::chrono::milliseconds(100));
Expand Down Expand Up @@ -381,7 +381,6 @@ BucketIndex::createIndex(BucketManager& bm,

ZoneScoped;
auto const& cfg = bm.getConfig();
releaseAssertOrThrow(cfg.isUsingBucketListDB());
releaseAssertOrThrow(!filename.empty());
auto pageSize = effectivePageSize(cfg, fs::size(filename.string()));

Expand Down
Loading
Loading