Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Official release for Nitro v2.3.3 with Celestia DA #15

Merged
merged 20 commits into from
Jun 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 6 additions & 4 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
[submodule "go-ethereum"]
path = go-ethereum
url = https://github.com/OffchainLabs/go-ethereum.git
url = https://github.com/celestiaorg/go-ethereum.git
branch = celestia-v2.3.1
[submodule "fastcache"]
path = fastcache
url = https://github.com/OffchainLabs/fastcache.git
Expand All @@ -12,11 +13,12 @@
url = https://github.com/google/brotli.git
[submodule "contracts"]
path = contracts
url = https://github.com/OffchainLabs/nitro-contracts.git
branch = develop
url = https://github.com/celestiaorg/nitro-contracts.git
branch = contracts-v1.2.1
[submodule "arbitrator/wasm-testsuite/testsuite"]
path = arbitrator/wasm-testsuite/testsuite
url = https://github.com/WebAssembly/testsuite.git
[submodule "nitro-testnode"]
path = nitro-testnode
url = https://github.com/OffchainLabs/nitro-testnode.git
url = https://github.com/celestiaorg/nitro-testnode.git
branch = celestia-v2.3.1
10 changes: 6 additions & 4 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ WORKDIR /workspace
RUN apt-get update && apt-get install -y curl build-essential=12.9

FROM wasm-base as wasm-libs-builder
# clang / lld used by soft-float wasm
# clang / lld used by soft-float wasm
RUN apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1
# pinned rust 1.70.0
# pinned rust 1.70.0
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.70.0 --target x86_64-unknown-linux-gnu wasm32-unknown-unknown wasm32-wasi
COPY ./Makefile ./
COPY arbitrator/arbutil arbitrator/arbutil
Expand All @@ -54,7 +54,7 @@ FROM scratch as wasm-libs-export
COPY --from=wasm-libs-builder /workspace/ /

FROM wasm-base as wasm-bin-builder
# pinned go version
# pinned go version
RUN curl -L https://golang.org/dl/go1.20.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf -
COPY ./Makefile ./go.mod ./go.sum ./
COPY ./arbcompress ./arbcompress
Expand All @@ -66,6 +66,7 @@ COPY ./blsSignatures ./blsSignatures
COPY ./cmd/chaininfo ./cmd/chaininfo
COPY ./cmd/replay ./cmd/replay
COPY ./das/dastree ./das/dastree
COPY ./das/celestia ./das/celestia
COPY ./precompiles ./precompiles
COPY ./statetransfer ./statetransfer
COPY ./util ./util
Expand All @@ -78,6 +79,7 @@ COPY ./fastcache ./fastcache
COPY ./go-ethereum ./go-ethereum
COPY --from=brotli-wasm-export / target/
COPY --from=contracts-builder workspace/contracts/build/contracts/src/precompiles/ contracts/build/contracts/src/precompiles/
COPY --from=contracts-builder workspace/contracts/build/contracts/src/celestia/ contracts/build/contracts/src/celestia/
COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/
COPY --from=contracts-builder workspace/.make/ .make/
RUN PATH="$PATH:/usr/local/go/bin" NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-wasm-bin
Expand All @@ -103,7 +105,7 @@ WORKDIR /workspace
RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get update && \
apt-get install -y make wget gpg software-properties-common zlib1g-dev \
libstdc++-11-dev wabt clang llvm-dev libclang-common-14-dev libpolly-14-dev
libstdc++-11-dev wabt clang llvm-dev libclang-common-14-dev libpolly-14-dev
COPY arbitrator/Cargo.* arbitrator/
COPY arbitrator/arbutil arbitrator/arbutil
COPY arbitrator/prover/Cargo.toml arbitrator/prover/
Expand Down
1 change: 1 addition & 0 deletions arbitrator/jit/src/syscall.rs
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,7 @@ fn get_field(env: &mut WasmEnv, source: u32, field: &[u8]) -> GoValue {
}
None => GoValue::Null,
},
(PROCESS_ID, b"pid") => GoValue::Number(1.),
_ => {
let field = String::from_utf8_lossy(field);
eprintln!("Go trying to access unimplemented unknown JS value {source} field {field}");
Expand Down
8 changes: 8 additions & 0 deletions arbitrator/prover/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -351,3 +351,11 @@ pub unsafe extern "C" fn arbitrator_gen_proof(mach: *mut Machine) -> RustByteArr
pub unsafe extern "C" fn arbitrator_free_proof(proof: RustByteArray) {
drop(Vec::from_raw_parts(proof.ptr, proof.len, proof.capacity))
}

#[no_mangle]
pub unsafe extern "C" fn arbitrator_get_opcode(mach: *mut Machine) -> u16 {
match (*mach).get_next_instruction() {
Some(instruction) => return instruction.opcode.repr(),
None => panic!("Failed to get next opcode for Machine"),
}
}
8 changes: 7 additions & 1 deletion arbitrator/prover/src/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2154,7 +2154,6 @@ impl Machine {
);

// End next instruction proof, begin instruction specific serialization

if let Some(next_inst) = func.code.get(self.pc.inst()) {
if matches!(
next_inst.opcode,
Expand Down Expand Up @@ -2336,6 +2335,13 @@ impl Machine {
{
data.push(0); // inbox proof type
data.extend(msg_data);
match inbox_identifier {
InboxIdentifier::Sequencer => {
data.extend(msg_idx.to_be_bytes());
data.push(0x0);
}
InboxIdentifier::Delayed => data.push(0x1),
}
}
} else {
panic!("Should never ever get here");
Expand Down
4 changes: 4 additions & 0 deletions arbitrator/wasm-libraries/go-stub/src/value.rs
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,10 @@ pub unsafe fn get_field(source: u32, field: &[u8]) -> GoValue {
return GoValue::Null;
}
}
} else if source == PROCESS_ID {
if field == b"pid" {
return GoValue::Number(1.);
}
}

if let Some(source) = DynamicObjectPool::singleton().get(source).cloned() {
Expand Down
93 changes: 61 additions & 32 deletions arbnode/batch_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ import (
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/cmd/genericconf"
"github.com/offchainlabs/nitro/das"
celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types"
"github.com/offchainlabs/nitro/execution"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
"github.com/offchainlabs/nitro/util"
Expand Down Expand Up @@ -89,6 +90,7 @@ type BatchPoster struct {
gasRefunderAddr common.Address
building *buildingBatch
daWriter das.DataAvailabilityServiceWriter
celestiaWriter celestiaTypes.DataAvailabilityWriter
dataPoster *dataposter.DataPoster
redisLock *redislock.Simple
messagesPerBatch *arbmath.MovingAverage[uint64]
Expand Down Expand Up @@ -118,8 +120,11 @@ const (
)

type BatchPosterConfig struct {
Enable bool `koanf:"enable"`
DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"`
Enable bool `koanf:"enable"`
// TODO (Diego) rework the 3 configs below once unified writer interface is in
DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"`
DisableCelestiaFallbackStoreDataOnChain bool `koanf:"disable-celestia-fallback-store-data-on-chain" reload:"hot"`
DisableCelestiaFallbackStoreDataOnDAS bool `koanf:"disable-celestia-fallback-store-data-on-das" reload:"hot"`
// Max batch size.
MaxSize int `koanf:"max-size" reload:"hot"`
// Maximum 4844 blob enabled batch size.
Expand Down Expand Up @@ -259,17 +264,18 @@ var TestBatchPosterConfig = BatchPosterConfig{
}

type BatchPosterOpts struct {
DataPosterDB ethdb.Database
L1Reader *headerreader.HeaderReader
Inbox *InboxTracker
Streamer *TransactionStreamer
VersionGetter execution.FullExecutionClient
SyncMonitor *SyncMonitor
Config BatchPosterConfigFetcher
DeployInfo *chaininfo.RollupAddresses
TransactOpts *bind.TransactOpts
DAWriter das.DataAvailabilityServiceWriter
ParentChainID *big.Int
DataPosterDB ethdb.Database
L1Reader *headerreader.HeaderReader
Inbox *InboxTracker
Streamer *TransactionStreamer
VersionGetter execution.FullExecutionClient
SyncMonitor *SyncMonitor
Config BatchPosterConfigFetcher
DeployInfo *chaininfo.RollupAddresses
TransactOpts *bind.TransactOpts
DAWriter das.DataAvailabilityServiceWriter
CelestiaWriter celestiaTypes.DataAvailabilityWriter
ParentChainID *big.Int
}

func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, error) {
Expand Down Expand Up @@ -315,6 +321,7 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e
gasRefunderAddr: opts.Config().gasRefunder,
bridgeAddr: opts.DeployInfo.Bridge,
daWriter: opts.DAWriter,
celestiaWriter: opts.CelestiaWriter,
redisLock: redisLock,
}
b.messagesPerBatch, err = arbmath.NewMovingAverage[uint64](20)
Expand Down Expand Up @@ -1194,29 +1201,51 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
return false, nil
}

if b.daWriter != nil {
if !b.redisLock.AttemptLock(ctx) {
return false, errAttemptLockFailed
}

gotNonce, gotMeta, err := b.dataPoster.GetNextNonceAndMeta(ctx)
if b.celestiaWriter != nil {
celestiaMsg, err := b.celestiaWriter.Store(ctx, sequencerMsg)
if err != nil {
return false, err
}
if nonce != gotNonce || !bytes.Equal(batchPositionBytes, gotMeta) {
return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce)
}
if config.DisableCelestiaFallbackStoreDataOnChain && config.DisableCelestiaFallbackStoreDataOnDAS {
return false, errors.New("unable to post batch to Celestia and fallback storing data on chain and das is disabled")
}
if config.DisableCelestiaFallbackStoreDataOnDAS {
log.Warn("Falling back to storing data on chain ", "err", err)
} else {
log.Warn("Falling back to storing data on DAC ", "err", err)

cert, err := b.daWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}) // b.daWriter will append signature if enabled
if errors.Is(err, das.BatchToDasFailed) {
if config.DisableDasFallbackStoreDataOnChain {
return false, errors.New("unable to batch to DAS and fallback storing data on chain is disabled")
}
log.Warn("Falling back to storing data on chain", "err", err)
} else if err != nil {
return false, err

// We nest the anytrust logic here for now as using this fork liekly means your primary DA is Celestia
// and the Anytrust DAC is instead used as a fallback
if b.daWriter != nil {
if config.DisableCelestiaFallbackStoreDataOnDAS {
return false, errors.New("found Celestia DA enabled and DAS, but fallbacks to DAS are disabled")
}
if !b.redisLock.AttemptLock(ctx) {
return false, errAttemptLockFailed
}

gotNonce, gotMeta, err := b.dataPoster.GetNextNonceAndMeta(ctx)
if err != nil {
return false, err
}
if nonce != gotNonce || !bytes.Equal(batchPositionBytes, gotMeta) {
return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce)
}

cert, err := b.daWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}) // b.daWriter will append signature if enabled
if errors.Is(err, das.BatchToDasFailed) {
if config.DisableDasFallbackStoreDataOnChain {
return false, errors.New("unable to batch to DAS and fallback storing data on chain is disabled")
}
log.Warn("Falling back to storing data on chain", "err", err)
} else if err != nil {
return false, err
} else {
sequencerMsg = das.Serialize(cert)
}
}
} else {
sequencerMsg = das.Serialize(cert)
sequencerMsg = celestiaMsg
}
}

Expand Down
2 changes: 1 addition & 1 deletion arbnode/delayed_seq_reorg_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) {
defer cancel()

exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{})
tracker, err := NewInboxTracker(db, streamer, nil, nil)
tracker, err := NewInboxTracker(db, streamer, nil, nil, nil)
Require(t, err)

err = streamer.Start(ctx)
Expand Down
8 changes: 7 additions & 1 deletion arbnode/inbox_tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,15 @@ type InboxTracker struct {
validator *staker.BlockValidator
das arbstate.DataAvailabilityReader
blobReader arbstate.BlobReader
celestia arbstate.CelestiaDataAvailabilityReader

batchMetaMutex sync.Mutex
batchMeta *containers.LruCache[uint64, BatchMetadata]
}

func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader, blobReader arbstate.BlobReader) (*InboxTracker, error) {
func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader, blobReader arbstate.BlobReader, celestia arbstate.CelestiaDataAvailabilityReader) (*InboxTracker, error) {
// We support a nil txStreamer for the pruning code
// TODO (DIEGO) Might be good to also change the configs to just support a param for "DA Service"
if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && das == nil {
return nil, errors.New("data availability service required but unconfigured")
}
Expand All @@ -54,6 +56,7 @@ func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arb
txStreamer: txStreamer,
das: das,
blobReader: blobReader,
celestia: celestia,
batchMeta: containers.NewLruCache[uint64, BatchMetadata](1000),
}
return tracker, nil
Expand Down Expand Up @@ -613,6 +616,9 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L
if t.blobReader != nil {
daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(t.blobReader))
}
if t.celestia != nil {
daProviders = append(daProviders, arbstate.NewDAProviderCelestia(t.celestia))
}
multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, arbstate.KeysetValidate)
batchMessageCounts := make(map[uint64]arbutil.MessageIndex)
currentpos := prevbatchmeta.MessageCount + 1
Expand Down
42 changes: 30 additions & 12 deletions arbnode/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ import (
"github.com/offchainlabs/nitro/broadcaster"
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/das"
"github.com/offchainlabs/nitro/das/celestia"
celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types"
"github.com/offchainlabs/nitro/execution"
"github.com/offchainlabs/nitro/execution/gethexec"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
Expand Down Expand Up @@ -92,6 +94,7 @@ type Config struct {
TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"`
Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"`
ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"`
Celestia celestia.DAConfig `koanf:"celestia-cfg"`
}

func (c *Config) Validate() error {
Expand Down Expand Up @@ -510,6 +513,8 @@ func createNodeImpl(
var daWriter das.DataAvailabilityServiceWriter
var daReader das.DataAvailabilityServiceReader
var dasLifecycleManager *das.LifecycleManager
var celestiaReader celestiaTypes.DataAvailabilityReader
var celestiaWriter celestiaTypes.DataAvailabilityWriter
if config.DataAvailability.Enable {
if config.BatchPoster.Enable {
daWriter, daReader, dasLifecycleManager, err = das.CreateBatchPosterDAS(ctx, &config.DataAvailability, dataSigner, l1client, deployInfo.SequencerInbox)
Expand All @@ -535,7 +540,17 @@ func createNodeImpl(
return nil, errors.New("a data availability service is required for this chain, but it was not configured")
}

inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader)
if config.Celestia.Enable {
celestiaService, err := celestia.NewCelestiaDA(&config.Celestia, nil)
if err != nil {
return nil, err
}

celestiaReader = celestiaService
celestiaWriter = celestiaService
}

inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader, celestiaReader)
if err != nil {
return nil, err
}
Expand All @@ -547,6 +562,7 @@ func createNodeImpl(

var statelessBlockValidator *staker.StatelessBlockValidator
if config.BlockValidator.ValidationServerConfigs[0].URL != "" {
// pass blobstream address and L1 connection
statelessBlockValidator, err = staker.NewStatelessBlockValidator(
inboxReader,
inboxTracker,
Expand All @@ -555,6 +571,7 @@ func createNodeImpl(
rawdb.NewTable(arbDb, storage.BlockValidatorPrefix),
daReader,
blobReader,
celestiaReader,
func() *staker.BlockValidatorConfig { return &configFetcher.Get().BlockValidator },
stack,
)
Expand Down Expand Up @@ -662,17 +679,18 @@ func createNodeImpl(
return nil, errors.New("batchposter, but no TxOpts")
}
batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{
DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix),
L1Reader: l1Reader,
Inbox: inboxTracker,
Streamer: txStreamer,
VersionGetter: exec,
SyncMonitor: syncMonitor,
Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster },
DeployInfo: deployInfo,
TransactOpts: txOptsBatchPoster,
DAWriter: daWriter,
ParentChainID: parentChainID,
DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix),
L1Reader: l1Reader,
Inbox: inboxTracker,
Streamer: txStreamer,
VersionGetter: exec,
SyncMonitor: syncMonitor,
Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster },
DeployInfo: deployInfo,
TransactOpts: txOptsBatchPoster,
DAWriter: daWriter,
CelestiaWriter: celestiaWriter,
ParentChainID: parentChainID,
})
if err != nil {
return nil, err
Expand Down
Loading
Loading