Skip to content

Commit

Permalink
import: read from era files (#2254)
Browse files Browse the repository at this point in the history
This PR extends the `nimbus import` command to also allow reading from
era files - this command allows creating or topping up an existing
database with data coming from era files instead of network sync.

* add `--era1-dir` and `--max-blocks` options to command line
* make `persistBlocks` report basic stats like transactions and gas
* improve error reporting in several API
* allow importing multiple RLP files in one go
* clean up logging options to match nimbus-eth2
* make sure database is closed properly on shutdown
  • Loading branch information
arnetheduck committed May 31, 2024
1 parent 324610e commit a375720
Show file tree
Hide file tree
Showing 28 changed files with 296 additions and 159 deletions.
4 changes: 2 additions & 2 deletions hive_integration/nodocker/engine/node.nim
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,7 @@ proc processBlock(
ValidationResult.OK

proc getVmState(c: ChainRef, header: BlockHeader):
Result[BaseVMState, void]
{.gcsafe, raises: [CatchableError].} =
Result[BaseVMState, void] =
if c.vmState.isNil.not:
return ok(c.vmState)

Expand All @@ -96,6 +95,7 @@ proc getVmState(c: ChainRef, header: BlockHeader):
debug "Cannot initialise VmState",
number = header.blockNumber
return err()

return ok(vmState)

# A stripped down version of persistBlocks without validation
Expand Down
13 changes: 6 additions & 7 deletions nimbus/beacon/api_handler/api_forkchoice.nim
Original file line number Diff line number Diff line change
Expand Up @@ -118,26 +118,25 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
return simpleFCU(PayloadExecutionStatus.invalid, "TDs unavailable for TDD check")

if td < ttd or (blockNumber > 0'u64 and ptd > ttd):
error "Refusing beacon update to pre-merge",
notice "Refusing beacon update to pre-merge",
number = blockNumber,
hash = blockHash.short,
diff = header.difficulty,
ptd = ptd,
ttd = ttd

return invalidFCU()
return invalidFCU("Refusing beacon update to pre-merge")

# If the head block is already in our canonical chain, the beacon client is
# probably resyncing. Ignore the update.
var canonHash: common.Hash256
if db.getBlockHash(header.blockNumber, canonHash) and canonHash == blockHash:
# TODO should this be possible?
# If we allow these types of reorgs, we will do lots and lots of reorgs during sync
debug "Reorg to previous block"
if chain.setCanonical(header) != ValidationResult.OK:
return invalidFCU(com, header)
elif chain.setCanonical(header) != ValidationResult.OK:
return invalidFCU(com, header)
notice "Reorg to previous block", blockHash

chain.setCanonical(header).isOkOr:
return invalidFCU(error, com, header)

# If the beacon client also advertised a finalized block, mark the local
# chain final and completely in PoS mode.
Expand Down
4 changes: 2 additions & 2 deletions nimbus/beacon/api_handler/api_newpayload.nim
Original file line number Diff line number Diff line change
Expand Up @@ -187,10 +187,10 @@ proc newPayload*(ben: BeaconEngineRef,
hash = blockHash, number = header.blockNumber
let body = blockBody(payload)
let vres = ben.chain.insertBlockWithoutSetHead(header, body)
if vres != ValidationResult.OK:
if vres.isErr:
ben.setInvalidAncestor(header, blockHash)
let blockHash = latestValidHash(db, parent, ttd)
return invalidStatus(blockHash, "Failed to insert block")
return invalidStatus(blockHash, vres.error())

# We've accepted a valid payload from the beacon client. Mark the local
# chain transitions to notify other subsystems (e.g. downloader) of the
Expand Down
24 changes: 15 additions & 9 deletions nimbus/beacon/api_handler/api_utils.nim
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,14 @@ proc simpleFCU*(status: PayloadExecutionStatus,
)
)

proc invalidFCU*(hash = common.Hash256()): ForkchoiceUpdatedResponse =
proc invalidFCU*(
validationError: string,
hash = common.Hash256()): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(payloadStatus:
PayloadStatusV1(
status: PayloadExecutionStatus.invalid,
latestValidHash: toValidHash(hash)
latestValidHash: toValidHash(hash),
validationError: some validationError
)
)

Expand Down Expand Up @@ -183,13 +186,16 @@ proc latestValidHash*(db: CoreDbRef,
# latestValidHash MUST be set to ZERO
common.Hash256()

proc invalidFCU*(com: CommonRef,
header: common.BlockHeader): ForkchoiceUpdatedResponse
{.gcsafe, raises: [RlpError].} =
proc invalidFCU*(validationError: string,
com: CommonRef,
header: common.BlockHeader): ForkchoiceUpdatedResponse =
var parent: common.BlockHeader
if not com.db.getBlockHeader(header.parentHash, parent):
return invalidFCU(common.Hash256())
return invalidFCU(validationError)

let blockHash = latestValidHash(com.db, parent,
com.ttd.get(high(common.BlockNumber)))
invalidFCU(blockHash)
let blockHash = try:
latestValidHash(com.db, parent, com.ttd.get(high(common.BlockNumber)))
except RlpError:
default(common.Hash256)

invalidFCU(validationError, blockHash)
2 changes: 1 addition & 1 deletion nimbus/common/common.nim
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ proc initializeEmptyDb*(com: CommonRef)
{.gcsafe, raises: [CatchableError].} =
let kvt = com.db.kvt()
if canonicalHeadHashKey().toOpenArray notin kvt:
trace "Writing genesis to DB"
info "Writing genesis to DB"
doAssert(com.genesisHeader.blockNumber.isZero,
"can't commit genesis block with number > 0")
discard com.db.persistHeaderToDb(com.genesisHeader,
Expand Down
26 changes: 21 additions & 5 deletions nimbus/config.nim
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,11 @@ type
abbr: "d"
name: "data-dir" }: OutDir

era1DirOpt* {.
desc: "Directory where era1 (pre-merge) archive can be found"
defaultValueDesc: "<data-dir>/era1"
name: "era1-dir" }: Option[OutDir]

keyStore* {.
desc: "Load one or more keystore files from this directory"
defaultValue: defaultKeystoreDir()
Expand All @@ -166,7 +171,7 @@ type
syncMode* {.
desc: "Specify particular blockchain sync mode."
longDesc:
"- default -- legacy sync mode\n" &
"- default -- beacon sync mode\n" &
"- full -- full blockchain archive\n" &
# "- snap -- experimental snap mode (development only)\n" &
""
Expand Down Expand Up @@ -475,12 +480,20 @@ type
name: "trusted-setup-file" .}: Option[string]

of `import`:

blocksFile* {.
argument
desc: "Import RLP encoded block(s) from a file, validate, write to database and quit"
defaultValue: ""
name: "blocks-file" }: InputFile
desc: "One or more RLP encoded block(s) files"
name: "blocks-file" }: seq[InputFile]

maxBlocks* {.
desc: "Maximum number of blocks to import"
defaultValue: uint64.high()
name: "max-blocks" .}: uint64

chunkSize* {.
desc: "Number of blocks per database transaction"
defaultValue: 8192
name: "chunk-size" .}: uint64

func parseCmdArg(T: type NetworkId, p: string): T
{.gcsafe, raises: [ValueError].} =
Expand Down Expand Up @@ -735,6 +748,9 @@ func httpServerEnabled*(conf: NimbusConf): bool =
conf.wsEnabled or
conf.rpcEnabled

func era1Dir*(conf: NimbusConf): OutDir =
conf.era1DirOpt.get(OutDir(conf.dataDir.string & "/era1"))

# KLUDGE: The `load()` template does currently not work within any exception
# annotated environment.
{.pop.}
Expand Down
13 changes: 6 additions & 7 deletions nimbus/core/block_import.nim
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
# This file may not be copied, modified, or distributed except according to
# those terms.

{.push raises: [].}

import
chronicles,
eth/rlp, stew/io2,
Expand All @@ -30,22 +32,19 @@ proc importRlpBlock*(blocksRlp: openArray[byte]; com: CommonRef; importFile: str
while rlp.hasData:
try:
rlp.decompose(header, body)
if chain.persistBlocks([header], [body]) == ValidationResult.Error:
# register one more error and continue
errorCount.inc
except RlpError as e:
# terminate if there was a decoding error
error "rlp error",
fileName = importFile,
msg = e.msg,
exception = e.name
return false
except CatchableError as e:
# otherwise continue

chain.persistBlocks([header], [body]).isOkOr():
# register one more error and continue
error "import error",
fileName = importFile,
msg = e.msg,
exception = e.name
error
errorCount.inc

return errorCount == 0
Expand Down
7 changes: 1 addition & 6 deletions nimbus/core/chain/chain_desc.nim
Original file line number Diff line number Diff line change
Expand Up @@ -41,24 +41,19 @@ type
## First block to when `extraValidation` will be applied (only
## effective if `extraValidation` is true.)

vmState: BaseVMState
## If it's not nil, block validation will use this
## If it's nil, a new vmState state will be created.

# ------------------------------------------------------------------------------
# Public constructors
# ------------------------------------------------------------------------------

proc newChain*(com: CommonRef,
extraValidation: bool, vmState = BaseVMState(nil)): ChainRef =
extraValidation: bool): ChainRef =
## Constructor for the `Chain` descriptor object.
## The argument `extraValidation` enables extra block
## chain validation if set `true`.
ChainRef(
com: com,
validateBlock: true,
extraValidation: extraValidation,
vmState: vmState,
)

func newChain*(com: CommonRef): ChainRef =
Expand Down
Loading

0 comments on commit a375720

Please sign in to comment.