be4665cb (main)
and
67bf0ed9 (PR)
+28 -28
+2 -2
{
"capability": "universal",
"daLayer": [
"DAC",
"Espresso"
"Espresso",
"DAC"
],
"hostChain": {
"id": "arbitrum",
"slug": "arbitrum",
"name": "Arbitrum One"
},
"layer": "layer3",
"proofSystem": {
"type": "Optimistic"
},
"purposes": [
"Universal"
],
"raas": "Caldera",
"reasonsForBeingOther": [
{
"label": "Closed proofs",
"shortDescription": "There are less than 5 external actors that can submit challenges",
"description": "Projects without a sufficiently decentralized set of challengers rely on few entities to safely update the state. A small set of challengers can collude with the proposer to finalize an invalid state, which can cause loss of funds."
},
{
"label": "Small DAC",
"shortDescription": "There are less than 5 external actors that can attest data availability",
"description": "Projects without a sufficiently decentralized data availability committee rely on few entities to safely attest data availability on Ethereum. A small set of entities can collude with the proposer to finalize an unavailable state, which can cause loss of funds."
}
],
"stacks": [
"Arbitrum"
],
"stage": "Not applicable",
"type": "Other",
"vm": [
"EVM"
]
}
+11 -11
[
{
"layer": {
"value": "DAC",
"value": "Espresso",
"sentiment": "warning",
"description": "The data is posted off chain and a Data Availability Committee (DAC) is responsible for protecting and supplying it."
"description": "The data is posted to Espresso.",
"projectId": "espresso"
},
"bridge": {
"value": "5/7 DAC Members",
"value": "None",
"sentiment": "bad",
"description": "There is a threshold of 5/7 members that must sign and attest that the data is correct and available.",
"orderHint": -2.1e-9
"description": "There is no bridge that can attest if the data has been made available.",
"orderHint": -2
},
"mode": {
"value": "Transaction data",
"secondLine": "Compressed"
}
},
{
"layer": {
"value": "Espresso",
"value": "DAC",
"sentiment": "warning",
"description": "The data is posted to Espresso.",
"projectId": "espresso"
"description": "The data is posted off chain and a Data Availability Committee (DAC) is responsible for protecting and supplying it."
},
"bridge": {
"value": "None",
"value": "5/7 DAC Members",
"sentiment": "bad",
"description": "There is no bridge that can attest if the data has been made available.",
"orderHint": -2
"description": "There is a threshold of 5/7 members that must sign and attest that the data is correct and available.",
"orderHint": -2.1e-9
},
"mode": {
"value": "Transaction data",
"secondLine": "Compressed"
}
}
]
+15 -15
{
"architectureImage": "orbit-optimium",
"dataAvailability": [
{
"name": "Data is not stored on chain",
"description": "Users transactions are not published onchain, but rather sent to external trusted parties, also known as committee members (DAC). Members of the DAC collectively produce a Data Availability Certificate (comprising BLS signatures from a quorum) guaranteeing that the data behind the new transaction batch will be available until the expiry period elapses (currently a minimum of two weeks). This signature is not verified by L1, however external Validators will skip the batch if BLS signature is not valid resulting. This will result in a fraud proof challenge if this batch is included in a consecutive state update. It is assumed that at least one honest DAC member that signed the batch will reveal tx data to the Validators if Sequencer decides to act maliciously and withhold the data. If the Sequencer cannot gather enough signatures from the DAC, it will \"fall back to rollup\" mode and by posting the full data directly to the L1 chain. The current DAC threshold is 5 out of 7.",
"name": "Data is posted to Espresso",
"description": "Transactions roots are posted onchain and the full data is posted on Espresso. Since the HotShot Light Client contract is not used, availability of the data is not verified against Espresso validators, meaning that the Sequencer can single-handedly publish unavailable roots.",
"risks": [
{
"category": "Funds can be lost if",
"text": "the external data becomes unavailable.",
"text": "the sequencer posts an unavailable transaction root.",
"isCritical": true
},
{
"category": "Users can be censored if",
"text": "the committee restricts their access to the external data."
"category": "Funds can be lost if",
"text": "the data is not available on the external provider.",
"isCritical": true
}
],
"references": [
{
"title": "Inside AnyTrust - Arbitrum documentation",
"url": "https://developer.offchainlabs.com/inside-anytrust"
"title": "Espresso Light Client",
"url": "https://docs.espressosys.com/network/learn/the-espresso-network/internal-functionality/light-client"
}
]
},
{
"name": "Data is posted to Espresso",
"description": "Transactions roots are posted onchain and the full data is posted on Espresso. Since the HotShot Light Client contract is not used, availability of the data is not verified against Espresso validators, meaning that the Sequencer can single-handedly publish unavailable roots.",
"name": "Data is not stored on chain",
"description": "Users transactions are not published onchain, but rather sent to external trusted parties, also known as committee members (DAC). Members of the DAC collectively produce a Data Availability Certificate (comprising BLS signatures from a quorum) guaranteeing that the data behind the new transaction batch will be available until the expiry period elapses (currently a minimum of two weeks). This signature is not verified by L1, however external Validators will skip the batch if BLS signature is not valid resulting. This will result in a fraud proof challenge if this batch is included in a consecutive state update. It is assumed that at least one honest DAC member that signed the batch will reveal tx data to the Validators if Sequencer decides to act maliciously and withhold the data. If the Sequencer cannot gather enough signatures from the DAC, it will \"fall back to rollup\" mode and by posting the full data directly to the L1 chain. The current DAC threshold is 5 out of 7.",
"risks": [
{
"category": "Funds can be lost if",
"text": "the sequencer posts an unavailable transaction root.",
"text": "the external data becomes unavailable.",
"isCritical": true
},
{
"category": "Funds can be lost if",
"text": "the data is not available on the external provider.",
"isCritical": true
"category": "Users can be censored if",
"text": "the committee restricts their access to the external data."
}
],
"references": [
{
"title": "Espresso Light Client",
"url": "https://docs.espressosys.com/network/learn/the-espresso-network/internal-functionality/light-client"
"title": "Inside AnyTrust - Arbitrum documentation",
"url": "https://developer.offchainlabs.com/inside-anytrust"
}
]
}
],
"exitMechanisms": [
{
"name": "Regular messaging",
"description": "The user initiates L2->L1 messages by submitting a regular transaction on this chain. When the block containing that transaction is settled, the message becomes available for processing on L1. The process of block finalization usually takes several days to complete.",
"risks": [],
"references": [
{
"title": "Transaction lifecycle - Arbitrum documentation",
"url": "https://developer.offchainlabs.com/tx-lifecycle"
},
{
"title": "L2 to L1 Messages - Arbitrum documentation",
"url": "https://docs.arbitrum.io/how-arbitrum-works/deep-dives/l2-to-l1-messaging"
},
{
"title": "Mainnet for everyone - Arbitrum Blog",
"url": "https://offchain.medium.com/mainnet-for-everyone-27ce0f67c85e"
}
]
},
{
"name": "Autonomous exit",
"description": "Users can (eventually) exit the system by pushing the transaction on L1 and providing the corresponding state root. The only way to prevent such withdrawal is via an upgrade.",
"risks": [],
"references": []
}
],
"forceTransactions": {
"name": "Users can force any transaction",
"description": "Because the state of the system is based on transactions submitted on the underlying host chain and anyone can submit their transactions there it allows the users to circumvent censorship by interacting with the smart contract on the host chain directly. After a delay of 3d in which a Sequencer has failed to include a transaction that was directly posted to the smart contract, it can be forcefully included by anyone on the host chain, which finalizes its ordering.",
"risks": [],
"references": [
{
"title": "SequencerInbox.sol - source code, forceInclusion function",
"url": "https://arbiscan.io/address/0xCfAfB803EF1FEc576138Cebc79Ad41Aa6760C575#code"
},
{
"title": "Sequencer Isn't Doing Its Job - Arbitrum documentation",
"url": "https://docs.arbitrum.io/how-arbitrum-works/sequencer#unhappyuncommon-case-sequencer-isnt-doing-its-job"
}
]
},
"operator": {
"name": "The system has a centralized sequencer",
"description": "While forcing transaction is open to anyone the system employs a privileged sequencer that has priority for submitting transaction batches and ordering transactions.",
"risks": [
{
"category": "MEV can be extracted if",
"text": "the operator exploits their centralized position and frontruns user transactions."
}
],
"references": [
{
"title": "Sequencer - Arbitrum documentation",
"url": "https://docs.arbitrum.io/how-arbitrum-works/inside-arbitrum-nitro#the-sequencer"
}
]
},
"otherConsiderations": [
{
"name": "EVM compatible smart contracts are supported",
"description": "Arbitrum One uses Nitro technology that allows running fraud proofs by executing EVM code on top of WASM.",
"risks": [],
"references": [
{
"title": "Inside Arbitrum Nitro",
"url": "https://developer.offchainlabs.com/inside-arbitrum-nitro/"
}
]
}
],
"sequencing": {
"name": "Delayed forced transactions",
"description": "To force transactions from the host chain, users must first enqueue \"delayed\" messages in the \"delayed\" inbox of the Bridge contract. Only authorized Inboxes are allowed to enqueue delayed messages, and the so-called Inbox contract is the one used as the entry point by calling the `sendMessage` or `sendMessageFromOrigin` functions. If the centralized sequencer doesn't process the request within some time bound, users can call the `forceInclusion` function on the SequencerInbox contract to include the message in the canonical chain. The time bound is hardcoded to be 3d.",
"references": [],
"risks": []
},
"stateValidation": {
"description": "Updates to the system state can be proposed and challenged by a set of whitelisted validators. If a state root passes the challenge period, it is optimistically considered correct and made actionable for withdrawals.",
"categories": [
{
"title": "State root proposals",
"description": "Whitelisted validators propose state roots as children of a previous state root. A state root can have multiple conflicting children. This structure forms a graph, and therefore, in the contracts, state roots are referred to as nodes. Each proposal requires a stake, currently set to 0.1 ETH, that can be slashed if the proposal is proven incorrect via a fraud proof. Stakes can be moved from one node to one of its children, either by calling `stakeOnExistingNode` or `stakeOnNewNode`. New nodes cannot be created faster than the minimum assertion period by the same validator, currently set to 15m. The oldest unconfirmed node can be confirmed if the challenge period has passed and there are no siblings, and rejected if the parent is not a confirmed node or if the challenge period has passed and no one is staked on it.",
"risks": [
{
"category": "Funds can be stolen if",
"text": "none of the whitelisted verifiers checks the published state. Fraud proofs assume at least one honest and able validator.",
"isCritical": true
}
],
"references": [
{
"title": "How is fraud proven - Arbitrum documentation FAQ",
"url": "https://docs.arbitrum.io/get-started/arbitrum-introduction"
}
]
},
{
"title": "Challenges",
"description": "A challenge can be started between two siblings, i.e. two different state roots that share the same parent, by calling the `startChallenge` function. Validators cannot be in more than one challenge at the same time, meaning that the protocol operates with [partial concurrency](https://medium.com/l2beat/fraud-proof-wars-b0cb4d0f452a). Since each challenge lasts 6d 8h, this implies that the protocol can be subject to [delay attacks](https://medium.com/offchainlabs/solutions-to-delay-attacks-on-rollups-434f9d05a07a), where a malicious actor can delay withdrawals as long as they are willing to pay the cost of losing their stakes. If the protocol is delayed attacked, the new stake requirement increases exponentially for each challenge period of delay. Challenges are played via a bisection game, where asserter and challenger play together to find the first instruction of disagreement. Such instruction is then executed onchain in the WASM OneStepProver contract to determine the winner, who then gets half of the stake of the loser. As said before, a state root is rejected only when no one left is staked on it. The protocol does not enforces valid bisections, meaning that actors can propose correct initial claim and then provide incorrect midpoints.",
"references": [
{
"title": "Fraud Proof Wars: Arbitrum Classic",
"url": "https://medium.com/l2beat/fraud-proof-wars-b0cb4d0f452a"
}
]
}
]
},
"stateValidationImage": "orbit",
"warning": "Fraud proof system is fully deployed but is not yet permissionless as it requires Validators to be whitelisted."
}
+28 -28
+2 -2
{
"capability": "universal",
"daLayer": [
"DAC",
"Espresso"
"Espresso",
"DAC"
],
"hostChain": {
"id": "ethereum",
"slug": "ethereum",
"name": "Ethereum"
},
"layer": "layer2",
"proofSystem": {
"type": "Optimistic"
},
"purposes": [
"Universal"
],
"raas": "Caldera",
"reasonsForBeingOther": [
{
"label": "Closed proofs",
"shortDescription": "There are less than 5 external actors that can submit challenges",
"description": "Projects without a sufficiently decentralized set of challengers rely on few entities to safely update the state. A small set of challengers can collude with the proposer to finalize an invalid state, which can cause loss of funds."
},
{
"label": "Small DAC",
"shortDescription": "There are less than 5 external actors that can attest data availability",
"description": "Projects without a sufficiently decentralized data availability committee rely on few entities to safely attest data availability on Ethereum. A small set of entities can collude with the proposer to finalize an unavailable state, which can cause loss of funds."
}
],
"stacks": [
"Arbitrum"
],
"stage": "Not applicable",
"type": "Other",
"vm": [
"EVM"
]
}
+11 -11
[
{
"layer": {
"value": "DAC",
"value": "Espresso",
"sentiment": "warning",
"description": "The data is posted off chain and a Data Availability Committee (DAC) is responsible for protecting and supplying it."
"description": "The data is posted to Espresso.",
"projectId": "espresso"
},
"bridge": {
"value": "1/1 DAC Members",
"value": "None",
"sentiment": "bad",
"description": "There is a threshold of 1/1 members that must sign and attest that the data is correct and available.",
"orderHint": -0.01
"description": "There is no bridge that can attest if the data has been made available.",
"orderHint": -2
},
"mode": {
"value": "Transaction data",
"secondLine": "Compressed"
}
},
{
"layer": {
"value": "Espresso",
"value": "DAC",
"sentiment": "warning",
"description": "The data is posted to Espresso.",
"projectId": "espresso"
"description": "The data is posted off chain and a Data Availability Committee (DAC) is responsible for protecting and supplying it."
},
"bridge": {
"value": "None",
"value": "1/1 DAC Members",
"sentiment": "bad",
"description": "There is no bridge that can attest if the data has been made available.",
"orderHint": -2
"description": "There is a threshold of 1/1 members that must sign and attest that the data is correct and available.",
"orderHint": -0.01
},
"mode": {
"value": "Transaction data",
"secondLine": "Compressed"
}
}
]
+15 -15
{
"architectureImage": "orbit-optimium",
"dataAvailability": [
{
"name": "Data is not stored on chain",
"description": "Users transactions are not published onchain, but rather sent to external trusted parties, also known as committee members (DAC). Members of the DAC collectively produce a Data Availability Certificate (comprising BLS signatures from a quorum) guaranteeing that the data behind the new transaction batch will be available until the expiry period elapses (currently a minimum of two weeks). This signature is not verified by L1, however external Validators will skip the batch if BLS signature is not valid resulting. This will result in a fraud proof challenge if this batch is included in a consecutive state update. It is assumed that at least one honest DAC member that signed the batch will reveal tx data to the Validators if Sequencer decides to act maliciously and withhold the data. If the Sequencer cannot gather enough signatures from the DAC, it will \"fall back to rollup\" mode and by posting the full data directly to the L1 chain. The current DAC threshold is 1 out of 1.",
"name": "Data is posted to Espresso",
"description": "Transactions roots are posted onchain and the full data is posted on Espresso. Since the HotShot Light Client contract is not used, availability of the data is not verified against Espresso validators, meaning that the Sequencer can single-handedly publish unavailable roots.",
"risks": [
{
"category": "Funds can be lost if",
"text": "the external data becomes unavailable.",
"text": "the sequencer posts an unavailable transaction root.",
"isCritical": true
},
{
"category": "Users can be censored if",
"text": "the committee restricts their access to the external data."
"category": "Funds can be lost if",
"text": "the data is not available on the external provider.",
"isCritical": true
}
],
"references": [
{
"title": "Inside AnyTrust - Arbitrum documentation",
"url": "https://developer.offchainlabs.com/inside-anytrust"
"title": "Espresso Light Client",
"url": "https://docs.espressosys.com/network/learn/the-espresso-network/internal-functionality/light-client"
}
]
},
{
"name": "Data is posted to Espresso",
"description": "Transactions roots are posted onchain and the full data is posted on Espresso. Since the HotShot Light Client contract is not used, availability of the data is not verified against Espresso validators, meaning that the Sequencer can single-handedly publish unavailable roots.",
"name": "Data is not stored on chain",
"description": "Users transactions are not published onchain, but rather sent to external trusted parties, also known as committee members (DAC). Members of the DAC collectively produce a Data Availability Certificate (comprising BLS signatures from a quorum) guaranteeing that the data behind the new transaction batch will be available until the expiry period elapses (currently a minimum of two weeks). This signature is not verified by L1, however external Validators will skip the batch if BLS signature is not valid resulting. This will result in a fraud proof challenge if this batch is included in a consecutive state update. It is assumed that at least one honest DAC member that signed the batch will reveal tx data to the Validators if Sequencer decides to act maliciously and withhold the data. If the Sequencer cannot gather enough signatures from the DAC, it will \"fall back to rollup\" mode and by posting the full data directly to the L1 chain. The current DAC threshold is 1 out of 1.",
"risks": [
{
"category": "Funds can be lost if",
"text": "the sequencer posts an unavailable transaction root.",
"text": "the external data becomes unavailable.",
"isCritical": true
},
{
"category": "Funds can be lost if",
"text": "the data is not available on the external provider.",
"isCritical": true
"category": "Users can be censored if",
"text": "the committee restricts their access to the external data."
}
],
"references": [
{
"title": "Espresso Light Client",
"url": "https://docs.espressosys.com/network/learn/the-espresso-network/internal-functionality/light-client"
"title": "Inside AnyTrust - Arbitrum documentation",
"url": "https://developer.offchainlabs.com/inside-anytrust"
}
]
}
],
"exitMechanisms": [
{
"name": "Regular messaging",
"description": "The user initiates L2->L1 messages by submitting a regular transaction on this chain. When the block containing that transaction is settled, the message becomes available for processing on L1. The process of block finalization usually takes several days to complete.",
"risks": [],
"references": [
{
"title": "Transaction lifecycle - Arbitrum documentation",
"url": "https://developer.offchainlabs.com/tx-lifecycle"
},
{
"title": "L2 to L1 Messages - Arbitrum documentation",
"url": "https://docs.arbitrum.io/how-arbitrum-works/deep-dives/l2-to-l1-messaging"
},
{
"title": "Mainnet for everyone - Arbitrum Blog",
"url": "https://offchain.medium.com/mainnet-for-everyone-27ce0f67c85e"
}
]
},
{
"name": "Autonomous exit",
"description": "Users can (eventually) exit the system by pushing the transaction on L1 and providing the corresponding state root. The only way to prevent such withdrawal is via an upgrade.",
"risks": [],
"references": []
}
],
"forceTransactions": {
"name": "Users can force any transaction",
"description": "Because the state of the system is based on transactions submitted on the underlying host chain and anyone can submit their transactions there it allows the users to circumvent censorship by interacting with the smart contract on the host chain directly. After a delay of 3d in which a Sequencer has failed to include a transaction that was directly posted to the smart contract, it can be forcefully included by anyone on the host chain, which finalizes its ordering.",
"risks": [],
"references": [
{
"title": "SequencerInbox.sol - source code, forceInclusion function",
"url": "https://etherscan.io/address/0x2C381da225148f7d6390f0EE4A162F958ec40e7A#code"
},
{
"title": "Sequencer Isn't Doing Its Job - Arbitrum documentation",
"url": "https://docs.arbitrum.io/how-arbitrum-works/sequencer#unhappyuncommon-case-sequencer-isnt-doing-its-job"
}
]
},
"operator": {
"name": "The system has a centralized sequencer",
"description": "While forcing transaction is open to anyone the system employs a privileged sequencer that has priority for submitting transaction batches and ordering transactions.",
"risks": [
{
"category": "MEV can be extracted if",
"text": "the operator exploits their centralized position and frontruns user transactions."
}
],
"references": [
{
"title": "Sequencer - Arbitrum documentation",
"url": "https://docs.arbitrum.io/how-arbitrum-works/inside-arbitrum-nitro#the-sequencer"
}
]
},
"otherConsiderations": [
{
"name": "EVM compatible smart contracts are supported",
"description": "Arbitrum One uses Nitro technology that allows running fraud proofs by executing EVM code on top of WASM.",
"risks": [],
"references": [
{
"title": "Inside Arbitrum Nitro",
"url": "https://developer.offchainlabs.com/inside-arbitrum-nitro/"
}
]
}
],
"sequencing": {
"name": "Espresso TEE sequencer",
"description": "Integration with Espresso sequencing. \n In addition to providing regular pre-confirmations, the sequencer publishes blocks to the Espresso Network.\n The integration expects the transaction batch poster to run inside a Trusted Execution Environment (TEE), and it is programmed to verify batch inclusion in a Espresso Network block before publishing it to the host chain.\n However, the confirmations provided by Espresso Network are additive, and the batch poster can skip Espresso inclusion checks should the Espresso Network be down or unavailable.\n To ensure the batch poster is running inside a TEE, the SequencerInbox contract on the host chain was updated so that the data posting function also includes a TEE attestation as input (a \"quote\" / signature) that is verified onchain by the EspressoTEEVerifier for each batch transaction. \n The verifier checks whether the signature originates from inside the TEE and reverts if unsuccessful.",
"references": [
{
"url": "https://github.com/EspressoSystems/nitro-espresso-integration/blob/7ddcc6c036fa05cc47560552c85f30b5adedf32c/arbnode/batch_poster.go#L574",
"title": "Nitro Espresso Integration"
},
{
"url": "https://gramine.readthedocs.io/en/stable/sgx-intro.html#:~:text=The%20SGX%20quote%20is%20a%20signed%20report%20that%20contains%20the%20enclave%20measurement%20and%20the%20signer%20measurement%20of%20the%20enclave%20and%20the%20signer%20of%20the%20signer%20process%20that%20created%20the%20report.",
"title": "SGX Quote"
}
],
"risks": [
{
"category": "Withdrawals can be delayed if",
"text": "the owner of EspressoTEEVerifier updates the contract verification values (enclave hash, signer) and it is no longer possible to verify the TEE quote."
}
]
},
"stateValidation": {
"description": "Updates to the system state can be proposed and challenged by a set of whitelisted validators. If a state root passes the challenge period, it is optimistically considered correct and made actionable for withdrawals.",
"categories": [
{
"title": "State root proposals",
"description": "Whitelisted validators propose state roots as children of a previous state root. A state root can have multiple conflicting children. This structure forms a graph, and therefore, in the contracts, state roots are referred to as nodes. Each proposal requires a stake, currently set to 0.1 ETH, that can be slashed if the proposal is proven incorrect via a fraud proof. Stakes can be moved from one node to one of its children, either by calling `stakeOnExistingNode` or `stakeOnNewNode`. New nodes cannot be created faster than the minimum assertion period by the same validator, currently set to 15m. The oldest unconfirmed node can be confirmed if the challenge period has passed and there are no siblings, and rejected if the parent is not a confirmed node or if the challenge period has passed and no one is staked on it.",
"risks": [
{
"category": "Funds can be stolen if",
"text": "none of the whitelisted verifiers checks the published state. Fraud proofs assume at least one honest and able validator.",
"isCritical": true
}
],
"references": [
{
"title": "How is fraud proven - Arbitrum documentation FAQ",
"url": "https://docs.arbitrum.io/get-started/arbitrum-introduction"
}
]
},
{
"title": "Challenges",
"description": "A challenge can be started between two siblings, i.e. two different state roots that share the same parent, by calling the `startChallenge` function. Validators cannot be in more than one challenge at the same time, meaning that the protocol operates with [partial concurrency](https://medium.com/l2beat/fraud-proof-wars-b0cb4d0f452a). Since each challenge lasts 6d 8h, this implies that the protocol can be subject to [delay attacks](https://medium.com/offchainlabs/solutions-to-delay-attacks-on-rollups-434f9d05a07a), where a malicious actor can delay withdrawals as long as they are willing to pay the cost of losing their stakes. If the protocol is delayed attacked, the new stake requirement increases exponentially for each challenge period of delay. Challenges are played via a bisection game, where asserter and challenger play together to find the first instruction of disagreement. Such instruction is then executed onchain in the WASM OneStepProver contract to determine the winner, who then gets half of the stake of the loser. As said before, a state root is rejected only when no one left is staked on it. The protocol does not enforces valid bisections, meaning that actors can propose correct initial claim and then provide incorrect midpoints.",
"references": [
{
"title": "Fraud Proof Wars: Arbitrum Classic",
"url": "https://medium.com/l2beat/fraud-proof-wars-b0cb4d0f452a"
}
]
}
]
},
"stateValidationImage": "orbit",
"warning": "Fraud proof system is fully deployed but is not yet permissionless as it requires Validators to be whitelisted."
}
+10 -0
+10 -0
{
"consensusAlgorithm": {
"name": "CometBFT",
"description": "CometBFT is the canonical implementation of the Tendermint consensus algorithm.\n CometBFT allows for a state transition machine to be written in any programming language, and it allows for secure replication across many machines.\n The consensus protocol is fork-free by construction under an honest majority of stake assumption.",
"blockTime": 15,
"consensusFinality": 1,
"unbondingPeriod": 1213200
},
"dataAvailabilitySampling": {
"erasureCodingScheme": "2D Reed-Solomon",
"erasureCodingProof": "Fraud proofs"
},
"economicSecurity": {
"token": {
"symbol": "TIA",
"decimals": 6,
"coingeckoId": "celestia"
}
},
"finality": 6,
"pruningWindow": 608400,
"risks": {
"economicSecurity": {
"value": {
"value": "Staked assets",
"sentiment": "good",
"description": "There are staked assets on the DA layer that can be slashed in case of a data withholding attack. A dishonest supermajority of validators must collude to finalize a block with missing or invalid data. The invalid block would be added to the chain but rejected by honest full nodes.\n "
},
"adjustSecurityRisk": true
},
"fraudDetection": {
"value": "DAS",
"sentiment": "warning",
"description": "The DA layer uses data availability sampling (DAS) to protect against data withholding attacks. However, the block reconstruction protocol, which enables the minimum number of light nodes to collectively reconstruct the block, is still under development.",
"secondLine": ""
}
},
"sovereignProjectsTrackingConfig": [
{
"projectId": "battle-for-blockchain",
"name": "Battle for Blockchain",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5047670,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAKzFLTn1xOipecg="
}
]
},
{
"projectId": "camp",
"name": "Camp",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 6459709,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAAB7AAAAAAAAAeQ="
}
]
},
{
"projectId": "civitia",
"name": "Civitia",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 4492300,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAEwLOhV+kOUlUq4="
}
]
},
{
"projectId": "clique",
"name": "Clique",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 3161819,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAARQV7t6sd4A="
}
]
},
{
"projectId": "echelon",
"name": "Echelon",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5659637,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAItyY42OaTC/skE="
}
]
},
{
"projectId": "echos",
"name": "Echos",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 3161819,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAA/cZMN1XLG8="
}
]
},
{
"projectId": "embr-fun",
"name": "Embr.fun",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5954601,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAMbONmnisjTDz4I="
}
]
},
{
"projectId": "flame",
"name": "Flame",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 3161819,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAL2dxfeNrJ+tg6Y="
}
]
},
{
"projectId": "flynet",
"name": "Flynet",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5188001,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAAAACyPE7Ql9zwA="
}
]
},
{
"projectId": "forma",
"name": "Forma",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 3161819,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAKKnitomrCy/HoY="
}
]
},
{
"projectId": "foundation-network",
"name": "Foundation Network",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 3667737,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAN/xAxpagCrjLVQ="
}
]
},
{
"projectId": "hibachi",
"name": "Hibachi",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5979823,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAABoaWJhY2hpLXM="
},
{
"type": "celestia",
"sinceBlock": 5981133,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAGhpYmFjaGk="
}
]
},
{
"projectId": "ing",
"name": "ING",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5945453,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAALdhUSKgKHU0Zx0="
}
]
},
{
"projectId": "inertia",
"name": "Inertia",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5941532,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAEg2If2QGyq4ZRQ="
}
]
},
{
"projectId": "intergaze",
"name": "Intergaze",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5748411,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAIoZCJNLh1XvOZA="
}
]
},
{
"projectId": "milkyway",
"name": "Milkyway",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5298640,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAB2AZsEwLd1LLHk="
}
]
},
{
"projectId": "onchain",
"name": "Onchain",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 3161819,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAAAABNfLrOLSCTY="
}
]
},
{
"projectId": "perennial",
"name": "Perennial",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 3886561,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAaToZYrE0tA="
}
]
},
{
"projectId": "rave",
"name": "Rave",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5645296,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAF45zaUciayEPXE="
}
]
},
{
"projectId": "rena",
"name": "Rena",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5775045,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAMxohfftlR5t59s="
}
]
},
{
"projectId": "rivalz-network",
"name": "Rivalz Network",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 4932528,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAMJ/xGlNMdE="
}
]
},
{
"projectId": "xo-market",
"name": "XO Market",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 8164261,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAACnGTcXcpKRnenc="
},
{
"type": "celestia",
"sinceBlock": 8383370,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAEjr00EdZDGvoMU="
}
]
},
{
"projectId": "yominet",
"name": "Yominet",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5966190,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAFjh+OUc/ORU/0o="
}
]
},
{
"projectId": "zaar",
"name": "Zaar",
"daTrackingConfig": [
{
"type": "celestia",
"sinceBlock": 5587852,
"namespace": "AAAAAAAAAAAAAAAAAAAAAAAAAM8NBxiaOQwFwQc="
}
]
}
],
"systemCategory": "public",
"technology": {
"description": "\n ## Architecture\n\n \n\n ## Consensus\n Celestia uses CometBTF, the canonical implementation of Tendermint consensus protocol. The consensus protocol is fork-free by construction under an honest majority of stake assumption.\n Celestia achieves finality at each block, with an average time between blocks of 6 seconds.\n ## Blobs\n In Celestia, blobs are user-submitted data that do not modify the blockchain state. \n Each blob has two components, one is a binary object of raw data bytes, and the other is the namespace of the specific application for which the blob data is intended for.\n\n\n \n\n All data posted in a Celestia blob is divided into chunks of fixed size, called shares, and each blob is arranged in a k * k matrix of shares. The maximum original square size is 512 (k = 512), for a total of 262,144 shares.\n\n\n \n\n Celestia shares' rows and columns are erasure-coded into a 2k * 2k matrix and committed to in a Namespaced Merkle Trees (NMTs), a version of a standard Merkle tree using a namespaced hash function. \n In NMTs, every node in the tree includes the range of namespaces of all its child nodes, allowing applications to request and retrieve data for a specific namespace sub-tree while maintaining all functionalities (e.g., inclusion and range proofs) of a standard Merkle tree.\n\n\n \n\n Ultimately, a single data root (availableDataRoot) of the Merkle tree is computed with the row and column roots as leaves. This data root is included in the block header as the root of commitments to erasure-coded data so that individual shares in the matrix can be proven to belong to a single data root.\n\n\n \n\n ## Data Availability Sampling (DAS)\n\n To ensure data availability, Celestia light nodes perform sampling on the 2k x 2k data matrix. Each light node randomly selects a set of unique coordinates within the extended matrix and requests the corresponding data shares and Merkle proofs from full nodes.\n Currently, a Celestia light node must perform a minimum of 16 samples before declaring that a block is available.\n This sampling rate ensures that given the minimum number of unavailable shares, a light client will sample at least one unavailable share with a 99% probability.\n\n For more details on DAS probabilistic analysis, see the Fraud and Data Availability Proofs paper.\n\n\n \n\n ## Erasure Coding Proof\n Light nodes performing data availability sampling must have the guarantee that the sampled data is erasure coded correctly. In Celestia, light nodes can be notified of a maliciously encoded block through Bad Encoding Fraud Proofs (BEFPs). Full nodes receiving invalid erasure-coded data can generate a fraud-proof to be transmitted to all light and full nodes in the DA network. The proof is generated by full nodes reconstructing the original data from the block data, and verifying that the recomputed data root matches the data root of the block header. \n Upon receiving and verifying the BEFP, all Celestia nodes should halt providing services (e.g., submitTx).\n\n ## L2s Data Availability\n L2s can post data to Celestia by submitting blobs through a payForBlobs transaction. The transaction can include data as a single blob or multiple blobs, with the total maximum size determined by the maximum block size. The transaction fee is determined by the size of the data and the current gas price. \n Applications can then retrieve the data by querying the Celestia blockchain for the data root of the blob and the namespace of the application. The data can be reconstructed by querying the Celestia network for the shares of the data matrix and reconstructing the data using the erasure coding scheme.\n ",
"references": [
{
"title": "Celestia Specifications",
"url": "https://celestiaorg.github.io/celestia-app/data_structures.html"
},
{
"title": "Celestia Core - CometBFT",
"url": "https://github.com/celestiaorg/celestia-core"
},
{
"title": "Celestia Node - Data Retrieval",
"url": "https://github.com/celestiaorg/celestia-node/blob/9ff58570ef86e505b718abfc755fd18643a2284c/share/eds/retriever.go#L60"
},
{
"title": "Bad Encoding Fraud Proofs",
"url": "https://github.com/celestiaorg/celestia-node/blob/main/docs/adr/adr-006-fraud-service.md"
},
{
"title": "Fraud and Data Availability Proofs paper",
"url": "https://arxiv.org/pdf/1809.09044"
}
],
"risks": [
{
"category": "Funds can be lost if",
"text": "a dishonest supermajority of Celestia validators finalizes an unavailable block, and there aren't light nodes on the network verifying data availability, or they fail at social signaling unavailable data."
},
{
"category": "Funds can be lost if",
"text": "a dishonest supermajority of Celestia validators finalizes an unavailable block, and the light nodes on the network cannot collectively reconstruct the block."
}
]
},
"throughput": [
{
"size": 33554432,
"frequency": 6,
"sinceTimestamp": 1764802980
},
{
"size": 8388608,
"frequency": 6,
"sinceTimestamp": 1738022400
},
{
"size": 1974272,
"frequency": 6,
"sinceTimestamp": 1733961600
},
{
"size": 1974272,
"frequency": 12,
"sinceTimestamp": 1698710400
}
],
"type": "Public Blockchain",
"usedWithoutBridgeIn": [
{
"id": "ancient",
"name": "Ancient8",
"slug": "ancient8"
},
{
"id": "b3",
"name": "B3",
"slug": "b3"
},
{
"id": "lyra",
"name": "Derive",
"slug": "derive"
},
{
"id": "eclipse",
"name": "Eclipse",
"slug": "eclipse"
},
{
"id": "form",
"name": "Form",
"slug": "form"
},
{
"id": "lightlink",
"name": "LightLink",
"slug": "lightlink"
},
{
"id": "mantapacific",
"name": "Manta Pacific",
"slug": "mantapacific"
},
{
"id": "molten",
"name": "Molten Network",
"slug": "molten"
},
{
"id": "orderly",
"name": "Orderly Network",
"slug": "orderly"
},
{
"id": "rari",
"name": "RARI Chain",
"slug": "rari"
},
{
"id": "river",
"name": "Towns",
"slug": "towns"
}
],
"validators": {
"type": "dynamic"
}
}
+65 -14
+10 -0
{
"badges": [
{
"id": "EVM",
"type": "VM",
"name": "EVM",
"description": "This project uses the Ethereum Virtual Machine to run its smart contracts and supports the Solidity programming language",
"action": {
"type": "scalingFilter",
"id": "vm",
"value": "EVM"
}
},
{
"id": "Celestia",
"type": "DA",
"name": "Celestia",
"description": "This project is posting its data to Celestia",
"action": {
"type": "publicDaHighlight",
"slug": "celestia"
}
},
{
"id": "DAC",
"type": "DA",
"name": "Data Availability Committee",
"description": "There is a Data Availability Committee that provides/attests to data availability",
"action": {
"type": "selfDaHighlight"
}
},
{
"id": "Espresso",
"type": "DA",
"name": "Espresso",
"description": "This project is posting its data to Espresso",
"action": {
"type": "publicDaHighlight",
"slug": "espresso"
}
},
{
"id": "Orbit",
"type": "Stack",
"name": "Built on Arbitrum Orbit",
"description": "The project is built on Arbitrum Orbit",
"action": {
"type": "scalingFilter",
"id": "stack",
"value": "Arbitrum"
}
},
{
"id": "Arbitrum",
"type": "L3ParentChain",
"name": "Built on top of Arbitrum",
"description": "The project has Arbitrum as its host chain",
"action": {
"type": "scalingFilter",
"id": "hostChain",
"value": "Arbitrum"
}
},
{
"id": "EspressoPreconfs",
"type": "Other",
"name": "Espresso Preconfs",
"description": "The project integrates with Espresso preconfirmations. The chain batch poster publishes blocks to Espresso Network and runs in a Trusted Execution Environment (TEE) programmed to verify that only Espresso-validated batches reach the host chain.",
"action": {
"type": "scalingFilter",
"id": "other",
"value": "Espresso Preconfs"
}
},
{
"id": "Caldera",
"type": "RaaS",
"name": "Caldera",
"description": "This project was deployed via the rollup-as-a-service provider Caldera",
"action": {
"type": "scalingFilter",
"id": "raas",
"value": "Caldera"
}
}
],
"description": "Molten is an Orbit stack L3 on Arbitrum with Celestia DA, created by the UniDex team.",
"links": {
"websites": [
"https://moltennetwork.com/"
],
"bridges": [
"https://molten.calderabridge.xyz/",
"https://leverage.unidex.exchange/"
],
"documentation": [
"https://docs.unidex.exchange/appchain/markdown"
],
"explorers": [
"https://molten.calderaexplorer.xyz"
],
"socialMedia": [
"https://x.com/moltenl3",
"https://discord.com/invite/YACsZnuqC9",
"https://mirror.xyz/unidexexchange.eth",
"https://t.me/unidexfinance"
]
}
}
+3 -2
{
"capability": "universal",
"daLayer": [
"DAC",
"Espresso"
"Celestia",
"Espresso",
"DAC"
],
"hostChain": {
"id": "arbitrum",
"slug": "arbitrum",
"name": "Arbitrum One"
},
"layer": "layer3",
"proofSystem": {
"type": "Optimistic"
},
"purposes": [
"Universal"
],
"raas": "Caldera",
"reasonsForBeingOther": [
{
"label": "Closed proofs",
"shortDescription": "There are less than 5 external actors that can submit challenges",
"description": "Projects without a sufficiently decentralized set of challengers rely on few entities to safely update the state. A small set of challengers can collude with the proposer to finalize an invalid state, which can cause loss of funds."
}
],
"stacks": [
"Arbitrum"
],
"stage": "Not applicable",
"type": "Other",
"vm": [
"EVM"
]
}
+23 -5
[
{
"layer": {
"value": "DAC",
"value": "Celestia",
"sentiment": "warning",
"description": "The data is posted off chain and a Data Availability Committee (DAC) is responsible for protecting and supplying it."
"description": "The data is posted to Celestia.",
"projectId": "celestia"
},
"bridge": {
"value": "1/1 DAC Members",
"value": "None",
"sentiment": "bad",
"description": "There is a threshold of 1/1 members that must sign and attest that the data is correct and available.",
"orderHint": -0.01
"description": "There is no bridge that can attest if the data has been made available.",
"orderHint": -2
},
"mode": {
"value": "Transaction data",
"secondLine": "Compressed"
}
},
{
"layer": {
"value": "Espresso",
"sentiment": "warning",
"description": "The data is posted to Espresso.",
"projectId": "espresso"
},
"bridge": {
"value": "None",
"sentiment": "bad",
"description": "There is no bridge that can attest if the data has been made available.",
"orderHint": -2
},
"mode": {
"value": "Transaction data",
"secondLine": "Compressed"
}
},
{
"layer": {
"value": "DAC",
"sentiment": "warning",
"description": "The data is posted off chain and a Data Availability Committee (DAC) is responsible for protecting and supplying it."
},
"bridge": {
"value": "1/1 DAC Members",
"sentiment": "bad",
"description": "There is a threshold of 1/1 members that must sign and attest that the data is correct and available.",
"orderHint": -0.01
},
"mode": {
"value": "Transaction data",
"secondLine": "Compressed"
}
}
]
+29 -7
{
"architectureImage": "orbit-optimium-blobstream-espresso",
"dataAvailability": [
{
"name": "Data is not stored on chain",
"description": "Users transactions are not published onchain, but rather sent to external trusted parties, also known as committee members (DAC). Members of the DAC collectively produce a Data Availability Certificate (comprising BLS signatures from a quorum) guaranteeing that the data behind the new transaction batch will be available until the expiry period elapses (currently a minimum of two weeks). This signature is not verified by L1, however external Validators will skip the batch if BLS signature is not valid resulting. This will result in a fraud proof challenge if this batch is included in a consecutive state update. It is assumed that at least one honest DAC member that signed the batch will reveal tx data to the Validators if Sequencer decides to act maliciously and withhold the data. If the Sequencer cannot gather enough signatures from the DAC, it will \"fall back to rollup\" mode and by posting the full data directly to the L1 chain. The current DAC threshold is 1 out of 1.",
"name": "Data is posted to Celestia",
"description": "Transactions roots are posted onchain and the full data is posted on Celestia. Since the Blobstream bridge is not used, availability of the data is not verified against Celestia validators, meaning that the Sequencer can single-handedly publish unavailable roots.",
"risks": [
{
"category": "Funds can be lost if",
"text": "the external data becomes unavailable.",
"text": "the sequencer posts an unavailable transaction root.",
"isCritical": true
},
{
"category": "Users can be censored if",
"text": "the committee restricts their access to the external data."
"category": "Funds can be lost if",
"text": "the data is not available on the external provider.",
"isCritical": true
}
],
"references": [
{
"title": "Inside AnyTrust - Arbitrum documentation",
"url": "https://developer.offchainlabs.com/inside-anytrust"
"title": "Introducing Blobstream: streaming modular DA to Ethereum",
"url": "https://blog.celestia.org/introducing-blobstream/"
}
]
},
{
"name": "Data is posted to Espresso",
"description": "Transactions roots are posted onchain and the full data is posted on Espresso. Since the HotShot Light Client contract is not used, availability of the data is not verified against Espresso validators, meaning that the Sequencer can single-handedly publish unavailable roots.",
"risks": [
{
"category": "Funds can be lost if",
"text": "the sequencer posts an unavailable transaction root.",
"isCritical": true
},
{
"category": "Funds can be lost if",
"text": "the data is not available on the external provider.",
"isCritical": true
}
],
"references": [
{
"title": "Espresso Light Client",
"url": "https://docs.espressosys.com/network/learn/the-espresso-network/internal-functionality/light-client"
}
]
},
{
"name": "Data is not stored on chain",
"description": "Users transactions are not published onchain, but rather sent to external trusted parties, also known as committee members (DAC). Members of the DAC collectively produce a Data Availability Certificate (comprising BLS signatures from a quorum) guaranteeing that the data behind the new transaction batch will be available until the expiry period elapses (currently a minimum of two weeks). This signature is not verified by L1, however external Validators will skip the batch if BLS signature is not valid resulting. This will result in a fraud proof challenge if this batch is included in a consecutive state update. It is assumed that at least one honest DAC member that signed the batch will reveal tx data to the Validators if Sequencer decides to act maliciously and withhold the data. If the Sequencer cannot gather enough signatures from the DAC, it will \"fall back to rollup\" mode and by posting the full data directly to the L1 chain. The current DAC threshold is 1 out of 1.",
"risks": [
{
"category": "Funds can be lost if",
"text": "the external data becomes unavailable.",
"isCritical": true
},
{
"category": "Users can be censored if",
"text": "the committee restricts their access to the external data."
}
],
"references": [
{
"title": "Inside AnyTrust - Arbitrum documentation",
"url": "https://developer.offchainlabs.com/inside-anytrust"
}
]
}
],
"exitMechanisms": [
{
"name": "Regular messaging",
"description": "The user initiates L2->L1 messages by submitting a regular transaction on this chain. When the block containing that transaction is settled, the message becomes available for processing on L1. The process of block finalization usually takes several days to complete.",
"risks": [],
"references": [
{
"title": "Transaction lifecycle - Arbitrum documentation",
"url": "https://developer.offchainlabs.com/tx-lifecycle"
},
{
"title": "L2 to L1 Messages - Arbitrum documentation",
"url": "https://docs.arbitrum.io/how-arbitrum-works/deep-dives/l2-to-l1-messaging"
},
{
"title": "Mainnet for everyone - Arbitrum Blog",
"url": "https://offchain.medium.com/mainnet-for-everyone-27ce0f67c85e"
}
]
},
{
"name": "Autonomous exit",
"description": "Users can (eventually) exit the system by pushing the transaction on L1 and providing the corresponding state root. The only way to prevent such withdrawal is via an upgrade.",
"risks": [],
"references": []
}
],
"forceTransactions": {
"name": "Users can force any transaction",
"description": "Because the state of the system is based on transactions submitted on the underlying host chain and anyone can submit their transactions there it allows the users to circumvent censorship by interacting with the smart contract on the host chain directly. After a delay of 1d in which a Sequencer has failed to include a transaction that was directly posted to the smart contract, it can be forcefully included by anyone on the host chain, which finalizes its ordering.",
"risks": [],
"references": [
{
"title": "SequencerInbox.sol - source code, forceInclusion function",
"url": "https://arbiscan.io/address/0x481863c96f949F5E13932ec2F65470C0CF83808d#code"
},
{
"title": "Sequencer Isn't Doing Its Job - Arbitrum documentation",
"url": "https://docs.arbitrum.io/how-arbitrum-works/sequencer#unhappyuncommon-case-sequencer-isnt-doing-its-job"
}
]
},
"operator": {
"name": "The system has a centralized sequencer",
"description": "While forcing transaction is open to anyone the system employs a privileged sequencer that has priority for submitting transaction batches and ordering transactions.",
"risks": [
{
"category": "MEV can be extracted if",
"text": "the operator exploits their centralized position and frontruns user transactions."
}
],
"references": [
{
"title": "Sequencer - Arbitrum documentation",
"url": "https://docs.arbitrum.io/how-arbitrum-works/inside-arbitrum-nitro#the-sequencer"
}
]
},
"otherConsiderations": [
{
"name": "EVM compatible smart contracts are supported",
"description": "Arbitrum One uses Nitro technology that allows running fraud proofs by executing EVM code on top of WASM.",
"risks": [],
"references": [
{
"title": "Inside Arbitrum Nitro",
"url": "https://developer.offchainlabs.com/inside-arbitrum-nitro/"
}
]
}
],
"sequencing": {
"name": "Espresso TEE sequencer",
"description": "Integration with Espresso sequencing. \n In addition to providing regular pre-confirmations, the sequencer publishes blocks to the Espresso Network.\n The integration expects the transaction batch poster to run inside a Trusted Execution Environment (TEE), and it is programmed to verify batch inclusion in a Espresso Network block before publishing it to the host chain.\n However, the confirmations provided by Espresso Network are additive, and the batch poster can skip Espresso inclusion checks should the Espresso Network be down or unavailable.\n To ensure the batch poster is running inside a TEE, the SequencerInbox contract on the host chain was updated so that the data posting function also includes a TEE attestation as input (a \"quote\" / signature) that is verified onchain by the EspressoTEEVerifier for each batch transaction. \n The verifier checks whether the signature originates from inside the TEE and reverts if unsuccessful.",
"references": [
{
"url": "https://github.com/EspressoSystems/nitro-espresso-integration/blob/7ddcc6c036fa05cc47560552c85f30b5adedf32c/arbnode/batch_poster.go#L574",
"title": "Nitro Espresso Integration"
},
{
"url": "https://gramine.readthedocs.io/en/stable/sgx-intro.html#:~:text=The%20SGX%20quote%20is%20a%20signed%20report%20that%20contains%20the%20enclave%20measurement%20and%20the%20signer%20measurement%20of%20the%20enclave%20and%20the%20signer%20of%20the%20signer%20process%20that%20created%20the%20report.",
"title": "SGX Quote"
}
],
"risks": [
{
"category": "Withdrawals can be delayed if",
"text": "the owner of EspressoTEEVerifier updates the contract verification values (enclave hash, signer) and it is no longer possible to verify the TEE quote."
}
]
},
"stateValidation": {
"description": "Updates to the system state can be proposed and challenged by a set of whitelisted validators. If a state root passes the challenge period, it is optimistically considered correct and made actionable for withdrawals.",
"categories": [
{
"title": "State root proposals",
"description": "Whitelisted validators propose state roots as children of a previous state root. A state root can have multiple conflicting children. This structure forms a graph, and therefore, in the contracts, state roots are referred to as nodes. Each proposal requires a stake, currently set to 0.1 ETH, that can be slashed if the proposal is proven incorrect via a fraud proof. Stakes can be moved from one node to one of its children, either by calling `stakeOnExistingNode` or `stakeOnNewNode`. New nodes cannot be created faster than the minimum assertion period by the same validator, currently set to 15m. The oldest unconfirmed node can be confirmed if the challenge period has passed and there are no siblings, and rejected if the parent is not a confirmed node or if the challenge period has passed and no one is staked on it.",
"risks": [
{
"category": "Funds can be stolen if",
"text": "none of the whitelisted verifiers checks the published state. Fraud proofs assume at least one honest and able validator.",
"isCritical": true
}
],
"references": [
{
"title": "How is fraud proven - Arbitrum documentation FAQ",
"url": "https://docs.arbitrum.io/get-started/arbitrum-introduction"
}
]
},
{
"title": "Challenges",
"description": "A challenge can be started between two siblings, i.e. two different state roots that share the same parent, by calling the `startChallenge` function. Validators cannot be in more than one challenge at the same time, meaning that the protocol operates with [partial concurrency](https://medium.com/l2beat/fraud-proof-wars-b0cb4d0f452a). Since each challenge lasts 6d 8h, this implies that the protocol can be subject to [delay attacks](https://medium.com/offchainlabs/solutions-to-delay-attacks-on-rollups-434f9d05a07a), where a malicious actor can delay withdrawals as long as they are willing to pay the cost of losing their stakes. If the protocol is delayed attacked, the new stake requirement increases exponentially for each challenge period of delay. Challenges are played via a bisection game, where asserter and challenger play together to find the first instruction of disagreement. Such instruction is then executed onchain in the WASM OneStepProver contract to determine the winner, who then gets half of the stake of the loser. As said before, a state root is rejected only when no one left is staked on it. The protocol does not enforces valid bisections, meaning that actors can propose correct initial claim and then provide incorrect midpoints.",
"references": [
{
"title": "Fraud Proof Wars: Arbitrum Classic",
"url": "https://medium.com/l2beat/fraud-proof-wars-b0cb4d0f452a"
}
]
}
]
},
"stateValidationImage": "orbit",
"warning": "Fraud proof system is fully deployed but is not yet permissionless as it requires Validators to be whitelisted."
}
+130 -15
+10 -0
{
"badges": [
{
"id": "EVM",
"type": "VM",
"name": "EVM",
"description": "This project uses the Ethereum Virtual Machine to run its smart contracts and supports the Solidity programming language",
"action": {
"type": "scalingFilter",
"id": "vm",
"value": "EVM"
}
},
{
"id": "Celestia",
"type": "DA",
"name": "Celestia",
"description": "This project is posting its data to Celestia",
"action": {
"type": "publicDaHighlight",
"slug": "celestia"
}
},
{
"id": "DAC",
"type": "DA",
"name": "Data Availability Committee",
"description": "There is a Data Availability Committee that provides/attests to data availability",
"action": {
"type": "selfDaHighlight"
}
},
{
"id": "Espresso",
"type": "DA",
"name": "Espresso",
"description": "This project is posting its data to Espresso",
"action": {
"type": "publicDaHighlight",
"slug": "espresso"
}
},
{
"id": "Orbit",
"type": "Stack",
"name": "Built on Arbitrum Orbit",
"description": "The project is built on Arbitrum Orbit",
"action": {
"type": "scalingFilter",
"id": "stack",
"value": "Arbitrum"
}
},
{
"id": "Arbitrum",
"type": "L3ParentChain",
"name": "Built on top of Arbitrum",
"description": "The project has Arbitrum as its host chain",
"action": {
"type": "scalingFilter",
"id": "hostChain",
"value": "Arbitrum"
}
},
{
"id": "EspressoPreconfs",
"type": "Other",
"name": "Espresso Preconfs",
"description": "The project integrates with Espresso preconfirmations. The chain batch poster publishes blocks to Espresso Network and runs in a Trusted Execution Environment (TEE) programmed to verify that only Espresso-validated batches reach the host chain.",
"action": {
"type": "scalingFilter",
"id": "other",
"value": "Espresso Preconfs"
}
},
{
"id": "Caldera",
"type": "RaaS",
"name": "Caldera",
"description": "This project was deployed via the rollup-as-a-service provider Caldera",
"action": {
"type": "scalingFilter",
"id": "raas",
"value": "Caldera"
}
}
],
"description": "RARI Chain embeds royalties on the node level to guarantee royalty payments. A secure, low-cost, decentralized Ethereum L3 blockchain powered by Arbitrum.",
"links": {
"websites": [
"https://rarichain.org/"
],
"bridges": [
"https://bridge.arbitrum.io/?destinationChain=rari-mainnet&sourceChain=arbitrum-one"
],
"documentation": [
"https://rari.docs.caldera.dev/"
],
"explorers": [
"https://mainnet.explorer.rarichain.org/"
],
"repositories": [
"https://github.com/OffchainLabs/nitro"
],
"socialMedia": [
"https://twitter.com/RariChain"
]
}
}
+3 -2
{
"capability": "universal",
"daLayer": [
"DAC",
"Espresso"
"Celestia",
"Espresso",
"DAC"
],
"hostChain": {
"id": "arbitrum",
"slug": "arbitrum",
"name": "Arbitrum One"
},
"layer": "layer3",
"proofSystem": {
"type": "Optimistic"
},
"purposes": [
"Universal",
"NFT"
],
"raas": "Caldera",
"reasonsForBeingOther": [
{
"label": "Closed proofs",
"shortDescription": "There are less than 5 external actors that can submit challenges",
"description": "Projects without a sufficiently decentralized set of challengers rely on few entities to safely update the state. A small set of challengers can collude with the proposer to finalize an invalid state, which can cause loss of funds."
}
],
"stacks": [
"Arbitrum"
],
"stage": "Not applicable",
"type": "Other",
"vm": [
"EVM"
]
}
+23 -5
[
{
"layer": {
"value": "DAC",
"value": "Celestia",
"sentiment": "warning",
"description": "The data is posted off chain and a Data Availability Committee (DAC) is responsible for protecting and supplying it."
"description": "The data is posted to Celestia.",
"projectId": "celestia"
},
"bridge": {
"value": "1/1 DAC Members",
"value": "None",
"sentiment": "bad",
"description": "There is a threshold of 1/1 members that must sign and attest that the data is correct and available.",
"orderHint": -0.01
"description": "There is no bridge that can attest if the data has been made available.",
"orderHint": -2
},
"mode": {
"value": "Transaction data",
"secondLine": "Compressed"
}
},
{
"layer": {
"value": "Espresso",
"sentiment": "warning",
"description": "The data is posted to Espresso.",
"projectId": "espresso"
},
"bridge": {
"value": "None",
"sentiment": "bad",
"description": "There is no bridge that can attest if the data has been made available.",
"orderHint": -2
},
"mode": {
"value": "Transaction data",
"secondLine": "Compressed"
}
},
{
"layer": {
"value": "DAC",
"sentiment": "warning",
"description": "The data is posted off chain and a Data Availability Committee (DAC) is responsible for protecting and supplying it."
},
"bridge": {
"value": "1/1 DAC Members",
"sentiment": "bad",
"description": "There is a threshold of 1/1 members that must sign and attest that the data is correct and available.",
"orderHint": -0.01
},
"mode": {
"value": "Transaction data",
"secondLine": "Compressed"
}
}
]
+29 -7
{
"architectureImage": "orbit-optimium-blobstream-espresso",
"dataAvailability": [
{
"name": "Data is not stored on chain",
"description": "Users transactions are not published onchain, but rather sent to external trusted parties, also known as committee members (DAC). Members of the DAC collectively produce a Data Availability Certificate (comprising BLS signatures from a quorum) guaranteeing that the data behind the new transaction batch will be available until the expiry period elapses (currently a minimum of two weeks). This signature is not verified by L1, however external Validators will skip the batch if BLS signature is not valid resulting. This will result in a fraud proof challenge if this batch is included in a consecutive state update. It is assumed that at least one honest DAC member that signed the batch will reveal tx data to the Validators if Sequencer decides to act maliciously and withhold the data. If the Sequencer cannot gather enough signatures from the DAC, it will \"fall back to rollup\" mode and by posting the full data directly to the L1 chain. The current DAC threshold is 1 out of 1.",
"name": "Data is posted to Celestia",
"description": "Transactions roots are posted onchain and the full data is posted on Celestia. Since the Blobstream bridge is not used, availability of the data is not verified against Celestia validators, meaning that the Sequencer can single-handedly publish unavailable roots.",
"risks": [
{
"category": "Funds can be lost if",
"text": "the external data becomes unavailable.",
"text": "the sequencer posts an unavailable transaction root.",
"isCritical": true
},
{
"category": "Users can be censored if",
"text": "the committee restricts their access to the external data."
"category": "Funds can be lost if",
"text": "the data is not available on the external provider.",
"isCritical": true
}
],
"references": [
{
"title": "Inside AnyTrust - Arbitrum documentation",
"url": "https://developer.offchainlabs.com/inside-anytrust"
"title": "Introducing Blobstream: streaming modular DA to Ethereum",
"url": "https://blog.celestia.org/introducing-blobstream/"
}
]
},
{
"name": "Data is posted to Espresso",
"description": "Transactions roots are posted onchain and the full data is posted on Espresso. Since the HotShot Light Client contract is not used, availability of the data is not verified against Espresso validators, meaning that the Sequencer can single-handedly publish unavailable roots.",
"risks": [
{
"category": "Funds can be lost if",
"text": "the sequencer posts an unavailable transaction root.",
"isCritical": true
},
{
"category": "Funds can be lost if",
"text": "the data is not available on the external provider.",
"isCritical": true
}
],
"references": [
{
"title": "Espresso Light Client",
"url": "https://docs.espressosys.com/network/learn/the-espresso-network/internal-functionality/light-client"
}
]
},
{
"name": "Data is not stored on chain",
"description": "Users transactions are not published onchain, but rather sent to external trusted parties, also known as committee members (DAC). Members of the DAC collectively produce a Data Availability Certificate (comprising BLS signatures from a quorum) guaranteeing that the data behind the new transaction batch will be available until the expiry period elapses (currently a minimum of two weeks). This signature is not verified by L1, however external Validators will skip the batch if BLS signature is not valid resulting. This will result in a fraud proof challenge if this batch is included in a consecutive state update. It is assumed that at least one honest DAC member that signed the batch will reveal tx data to the Validators if Sequencer decides to act maliciously and withhold the data. If the Sequencer cannot gather enough signatures from the DAC, it will \"fall back to rollup\" mode and by posting the full data directly to the L1 chain. The current DAC threshold is 1 out of 1.",
"risks": [
{
"category": "Funds can be lost if",
"text": "the external data becomes unavailable.",
"isCritical": true
},
{
"category": "Users can be censored if",
"text": "the committee restricts their access to the external data."
}
],
"references": [
{
"title": "Inside AnyTrust - Arbitrum documentation",
"url": "https://developer.offchainlabs.com/inside-anytrust"
}
]
}
],
"exitMechanisms": [
{
"name": "Regular messaging",
"description": "The user initiates L2->L1 messages by submitting a regular transaction on this chain. When the block containing that transaction is settled, the message becomes available for processing on L1. The process of block finalization usually takes several days to complete.",
"risks": [],
"references": [
{
"title": "Transaction lifecycle - Arbitrum documentation",
"url": "https://developer.offchainlabs.com/tx-lifecycle"
},
{
"title": "L2 to L1 Messages - Arbitrum documentation",
"url": "https://docs.arbitrum.io/how-arbitrum-works/deep-dives/l2-to-l1-messaging"
},
{
"title": "Mainnet for everyone - Arbitrum Blog",
"url": "https://offchain.medium.com/mainnet-for-everyone-27ce0f67c85e"
}
]
},
{
"name": "Autonomous exit",
"description": "Users can (eventually) exit the system by pushing the transaction on L1 and providing the corresponding state root. The only way to prevent such withdrawal is via an upgrade.",
"risks": [],
"references": []
}
],
"forceTransactions": {
"name": "Users can force any transaction",
"description": "Because the state of the system is based on transactions submitted on the underlying host chain and anyone can submit their transactions there it allows the users to circumvent censorship by interacting with the smart contract on the host chain directly. After a delay of 1d in which a Sequencer has failed to include a transaction that was directly posted to the smart contract, it can be forcefully included by anyone on the host chain, which finalizes its ordering.",
"risks": [],
"references": [
{
"title": "SequencerInbox.sol - source code, forceInclusion function",
"url": "https://arbiscan.io/address/0xF39c8d67B55Fef4851f9267304aA1A030E0DecAC#code"
},
{
"title": "Sequencer Isn't Doing Its Job - Arbitrum documentation",
"url": "https://docs.arbitrum.io/how-arbitrum-works/sequencer#unhappyuncommon-case-sequencer-isnt-doing-its-job"
}
]
},
"operator": {
"name": "The system has a centralized sequencer",
"description": "While forcing transaction is open to anyone the system employs a privileged sequencer that has priority for submitting transaction batches and ordering transactions.",
"risks": [
{
"category": "MEV can be extracted if",
"text": "the operator exploits their centralized position and frontruns user transactions."
}
],
"references": [
{
"title": "Sequencer - Arbitrum documentation",
"url": "https://docs.arbitrum.io/how-arbitrum-works/inside-arbitrum-nitro#the-sequencer"
}
]
},
"otherConsiderations": [
{
"name": "EVM compatible smart contracts are supported",
"description": "Arbitrum One uses Nitro technology that allows running fraud proofs by executing EVM code on top of WASM.",
"risks": [],
"references": [
{
"title": "Inside Arbitrum Nitro",
"url": "https://developer.offchainlabs.com/inside-arbitrum-nitro/"
}
]
}
],
"sequencing": {
"name": "Espresso TEE sequencer",
"description": "Integration with Espresso sequencing. \n In addition to providing regular pre-confirmations, the sequencer publishes blocks to the Espresso Network.\n The integration expects the transaction batch poster to run inside a Trusted Execution Environment (TEE), and it is programmed to verify batch inclusion in a Espresso Network block before publishing it to the host chain.\n However, the confirmations provided by Espresso Network are additive, and the batch poster can skip Espresso inclusion checks should the Espresso Network be down or unavailable.\n To ensure the batch poster is running inside a TEE, the SequencerInbox contract on the host chain was updated so that the data posting function also includes a TEE attestation as input (a \"quote\" / signature) that is verified onchain by the EspressoTEEVerifier for each batch transaction. \n The verifier checks whether the signature originates from inside the TEE and reverts if unsuccessful.",
"references": [
{
"url": "https://github.com/EspressoSystems/nitro-espresso-integration/blob/7ddcc6c036fa05cc47560552c85f30b5adedf32c/arbnode/batch_poster.go#L574",
"title": "Nitro Espresso Integration"
},
{
"url": "https://gramine.readthedocs.io/en/stable/sgx-intro.html#:~:text=The%20SGX%20quote%20is%20a%20signed%20report%20that%20contains%20the%20enclave%20measurement%20and%20the%20signer%20measurement%20of%20the%20enclave%20and%20the%20signer%20of%20the%20signer%20process%20that%20created%20the%20report.",
"title": "SGX Quote"
}
],
"risks": [
{
"category": "Withdrawals can be delayed if",
"text": "the owner of EspressoTEEVerifier updates the contract verification values (enclave hash, signer) and it is no longer possible to verify the TEE quote."
}
]
},
"stateValidation": {
"description": "Updates to the system state can be proposed and challenged by a set of whitelisted validators. If a state root passes the challenge period, it is optimistically considered correct and made actionable for withdrawals.",
"categories": [
{
"title": "State root proposals",
"description": "Whitelisted validators propose state roots as children of a previous state root. A state root can have multiple conflicting children. This structure forms a graph, and therefore, in the contracts, state roots are referred to as nodes. Each proposal requires a stake, currently set to 0.1 ETH, that can be slashed if the proposal is proven incorrect via a fraud proof. Stakes can be moved from one node to one of its children, either by calling `stakeOnExistingNode` or `stakeOnNewNode`. New nodes cannot be created faster than the minimum assertion period by the same validator, currently set to 15m. The oldest unconfirmed node can be confirmed if the challenge period has passed and there are no siblings, and rejected if the parent is not a confirmed node or if the challenge period has passed and no one is staked on it.",
"risks": [
{
"category": "Funds can be stolen if",
"text": "none of the whitelisted verifiers checks the published state. Fraud proofs assume at least one honest and able validator.",
"isCritical": true
}
],
"references": [
{
"title": "How is fraud proven - Arbitrum documentation FAQ",
"url": "https://docs.arbitrum.io/get-started/arbitrum-introduction"
}
]
},
{
"title": "Challenges",
"description": "A challenge can be started between two siblings, i.e. two different state roots that share the same parent, by calling the `startChallenge` function. Validators cannot be in more than one challenge at the same time, meaning that the protocol operates with [partial concurrency](https://medium.com/l2beat/fraud-proof-wars-b0cb4d0f452a). Since each challenge lasts 6d 8h, this implies that the protocol can be subject to [delay attacks](https://medium.com/offchainlabs/solutions-to-delay-attacks-on-rollups-434f9d05a07a), where a malicious actor can delay withdrawals as long as they are willing to pay the cost of losing their stakes. If the protocol is delayed attacked, the new stake requirement increases exponentially for each challenge period of delay. Challenges are played via a bisection game, where asserter and challenger play together to find the first instruction of disagreement. Such instruction is then executed onchain in the WASM OneStepProver contract to determine the winner, who then gets half of the stake of the loser. As said before, a state root is rejected only when no one left is staked on it. The protocol does not enforces valid bisections, meaning that actors can propose correct initial claim and then provide incorrect midpoints.",
"references": [
{
"title": "Fraud Proof Wars: Arbitrum Classic",
"url": "https://medium.com/l2beat/fraud-proof-wars-b0cb4d0f452a"
}
]
}
]
},
"stateValidationImage": "orbit",
"warning": "Fraud proof system is fully deployed but is not yet permissionless as it requires Validators to be whitelisted."
}
+65 -1
null
{
"dac": {
"membersCount": 1,
"requiredMembers": 1
},
"description": "Set of parties responsible for signing and attesting to the availability of data.",
"fallback": {
"value": "Ethereum",
"secondLine": "Calldata",
"sentiment": "good",
"description": "The data is posted to Ethereum as calldata.",
"projectId": "ethereum"
},
"risks": {
"committeeSecurity": {
"value": "1/1",
"sentiment": "bad",
"description": "The committee does not meet basic security standards, either due to insufficient size, lack of member diversity, or poorly defined threshold parameters. The system lacks an effective DA bridge and it is reliant on the assumption of an honest sequencer, creating significant risks to data integrity and availability.",
"orderHint": -0.01
},
"upgradeability": {
"value": "No delay",
"sentiment": "bad",
"description": "There is no delay in the upgradeability of the bridge. Users have no time to exit the system before the bridge implementation update is completed."
},
"relayerFailure": {
"value": "No mechanism",
"sentiment": "bad",
"description": "The relayer role is permissioned, and the DA bridge does not have a Security Council or a governance mechanism to propose new relayers. In case of relayer failure, the DA bridge will halt and be unable to recover without the intervention of a centralized entity."
},
"economicSecurity": {
"value": {
"value": "None",
"sentiment": "bad",
"description": "There are no onchain assets at risk of being slashed in case of a data withholding attack, and the committee members are not publicly known."
},
"adjustSecurityRisk": false
},
"fraudDetection": {
"value": "None",
"sentiment": "bad",
"description": "There is no fraud detection mechanism in place. A data withholding attack can only be detected by nodes downloading the full data from the DA layer."
}
},
"technology": {
"description": "\n## Architecture\n\n\nThe DAC uses a data availability solution built on the AnyTrust protocol. It is composed of the following components:\n- **Sequencer Inbox**: Main entry point for the Sequencer submitting transaction batches.\n- **Data Availability Committee (DAC)**: A group of members responsible for storing and providing data on demand.\n- **Data Availability Certificate (DACert)**: A commitment ensuring that data blobs are available without needing full data posting on the L1 chain. \n\n\nCommittee members run servers that support APIs for storing and retrieving data blobs. \nThe Sequencer API allows the rollup Sequencer to submit data blobs for storage, while the REST API enables anyone to fetch data by hash. \nWhen the Sequencer produces a data batch, it sends the batch along with an expiration time to Committee members, who store it and sign it. \nOnce enough signatures are collected, the Sequencer aggregates them into a valid DACert and posts it to the L1 chain inbox. \nIf the Sequencer fails to collect enough signatures, it falls back to posting the full data to the L1 chain as calldata. \n\n\nA DACert includes a hash of the data block, an expiration time, and proof that the required threshold of Committee members have signed off on the data. \nThe proof consists of a hash of the Keyset used in signing, a bitmap indicating which members signed, and a BLS aggregated signature. \nL2 nodes reading from the sequencer inbox verify the certificate’s validity by checking the number of signers, the aggregated signature, and that the expiration time is at least two weeks ahead of the L2 timestamp. \nIf the DACert is valid, it provides a proof that the corresponding data is available from honest committee members.\n\n## DA Bridge Architecture\n\n\nThe DA commitments are posted to the destination chain through the sequencer inbox, using the inbox as a DA bridge.\nThe DA commitment consists of Data Availability Certificate (DACert), including a hash of the data block, an expiration time, and a proof that the required threshold of Committee members have signed off on the data.\nThe sequencer distributes the data and collects signatures from Committee members offchain. Only the DACert is posted by the sequencer to the destination chain inbox (the DA bridge), achieving destination chain transaction ordering finality in a single onchain transaction.\n ",
"risks": [
{
"category": "Funds can be lost if",
"text": "a malicious committee attests to an invalid data availability certificate."
},
{
"category": "Funds can be lost if",
"text": "the bridge contract or its dependencies receive a malicious code upgrade. There is no delay on code upgrades."
}
],
"references": [
{
"title": "Inside AnyTrust - Arbitrum Docs",
"url": "https://docs.arbitrum.io/how-arbitrum-works/inside-anytrust"
}
]
},
"type": "Data Availability Committee"
}